You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by om...@apache.org on 2015/11/30 20:15:12 UTC

[01/27] hive git commit: HIVE-12329 :Turn on limit pushdown optimization by default (Ashutosh Chauhan via Prasanth J)

Repository: hive
Updated Branches:
  refs/heads/master-fixed 1918735ed -> e325eac9f


http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
index 12920d2..bcbdf06 100644
--- a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
@@ -161,6 +161,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: int)
                   sort order: +
                   Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
         Reducer 3 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_char_2.q.out b/ql/src/test/results/clientpositive/tez/vector_char_2.q.out
index 8545608..f88ee91 100644
--- a/ql/src/test/results/clientpositive/tez/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_char_2.q.out
@@ -106,6 +106,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: char(20))
                   sort order: +
                   Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint), _col2 (type: bigint)
         Reducer 3 
             Execution mode: vectorized
@@ -234,6 +235,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: char(20))
                   sort order: -
                   Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint), _col2 (type: bigint)
         Reducer 3 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_char_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_char_simple.q.out b/ql/src/test/results/clientpositive/tez/vector_char_simple.q.out
index be38775..617620c 100644
--- a/ql/src/test/results/clientpositive/tez/vector_char_simple.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_char_simple.q.out
@@ -79,6 +79,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: char(10))
                       sort order: +
                       Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: char(20))
             Execution mode: vectorized
         Reducer 2 
@@ -179,6 +180,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: char(10))
                       sort order: -
                       Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: char(20))
             Execution mode: vectorized
         Reducer 2 
@@ -282,6 +284,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: int)
             Execution mode: vectorized
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out b/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out
index c492113..1142485 100644
--- a/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out
@@ -41,6 +41,7 @@ STAGE PLANS:
                         key expressions: null (type: double), _col1 (type: string), _col2 (type: int), _col3 (type: float), _col4 (type: smallint), _col5 (type: string)
                         sort order: ++++++
                         Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
@@ -131,6 +132,7 @@ STAGE PLANS:
                         key expressions: null (type: tinyint), _col1 (type: double), _col2 (type: int), _col3 (type: double)
                         sort order: ++++
                         Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
@@ -219,6 +221,7 @@ STAGE PLANS:
                         key expressions: null (type: float), null (type: bigint), 0.0 (type: float)
                         sort order: +++
                         Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
@@ -309,6 +312,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: timestamp)
                         sort order: +++
                         Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
@@ -397,6 +401,7 @@ STAGE PLANS:
                         key expressions: null (type: float), null (type: bigint), null (type: float)
                         sort order: +++
                         Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_data_types.q.out b/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
index 8a21697..4197666 100644
--- a/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_data_types.q.out
@@ -125,6 +125,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                       sort order: +++
                       Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)
         Reducer 2 
             Reduce Operator Tree:
@@ -216,6 +217,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                       sort order: +++
                       Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)
             Execution mode: vectorized
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
index 08c3ae9..7532969 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
@@ -47,6 +47,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: decimal(25,14)), _col1 (type: decimal(26,14)), _col2 (type: double), _col3 (type: double), _col4 (type: decimal(12,10)), _col5 (type: int), _col6 (type: smallint), _col7 (type: tinyint), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: double), _col11 (type: float), _col12 (type: string), _col13 (type: timestamp)
                         sort order: ++++++++++++++
                         Statistics: Num rows: 228 Data size: 39491 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out
index 9942629..4322072 100644
--- a/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out
@@ -257,6 +257,7 @@ STAGE PLANS:
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
index 3e839a1..f60a584 100644
--- a/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
@@ -363,6 +363,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: string)
                   sort order: +
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
         Reducer 5 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out b/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
index 28b5f72..15e36b5 100644
--- a/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
@@ -58,6 +58,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col1 (type: tinyint)
             Execution mode: vectorized
         Reducer 2 
@@ -135,6 +136,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: string)
                         sort order: ++
                         Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out b/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
index 778e080..500e63e 100644
--- a/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
@@ -281,6 +281,7 @@ STAGE PLANS:
                       key expressions: _col5 (type: int), _col2 (type: date)
                       sort order: ++
                       Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: timestamp), _col4 (type: float)
             Execution mode: vectorized
         Reducer 2 
@@ -297,6 +298,7 @@ STAGE PLANS:
                     key expressions: _col5 (type: int), _col2 (type: date)
                     sort order: ++
                     Statistics: Num rows: 25 Data size: 7200 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: timestamp), _col4 (type: float)
         Reducer 3 
             Execution mode: vectorized
@@ -1037,6 +1039,7 @@ STAGE PLANS:
                       key expressions: _col4 (type: int), _col5 (type: date)
                       sort order: ++
                       Statistics: Num rows: 137 Data size: 31776 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp), _col3 (type: float)
             Execution mode: vectorized
         Reducer 2 
@@ -1053,6 +1056,7 @@ STAGE PLANS:
                     key expressions: _col4 (type: int), _col5 (type: date)
                     sort order: ++
                     Statistics: Num rows: 25 Data size: 5775 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp), _col3 (type: float)
         Reducer 3 
             Execution mode: vectorized
@@ -1841,6 +1845,7 @@ STAGE PLANS:
                       key expressions: _col4 (type: int), _col5 (type: timestamp)
                       sort order: ++
                       Statistics: Num rows: 137 Data size: 33968 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: date), _col3 (type: float)
             Execution mode: vectorized
         Reducer 2 
@@ -1857,6 +1862,7 @@ STAGE PLANS:
                     key expressions: _col4 (type: int), _col5 (type: timestamp)
                     sort order: ++
                     Statistics: Num rows: 25 Data size: 6175 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: date), _col3 (type: float)
         Reducer 3 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
index 337d83f..ec382db 100644
--- a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
@@ -70,6 +70,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
                   sort order: ++++
                   Statistics: Num rows: 763 Data size: 180068 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col4 (type: decimal(20,10))
         Reducer 3 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out b/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out
index 95aedfb..1f498a8 100644
--- a/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_string_concat.q.out
@@ -323,6 +323,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: string)
                   sort order: +
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vector_varchar_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/tez/vector_varchar_simple.q.out
index 4419c25..cdcc2b0 100644
--- a/ql/src/test/results/clientpositive/tez/vector_varchar_simple.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_varchar_simple.q.out
@@ -79,6 +79,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: varchar(10))
                       sort order: +
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: varchar(20))
             Execution mode: vectorized
         Reducer 2 
@@ -179,6 +180,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: varchar(10))
                       sort order: -
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: varchar(20))
             Execution mode: vectorized
         Reducer 2 
@@ -282,6 +284,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: int)
             Execution mode: vectorized
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_13.q.out b/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
index ac33341..04b474e 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
@@ -121,6 +121,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), _col5 (type: tinyint), _col6 (type: tinyint), _col7 (type: tinyint), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: float), _col19 (type: double), _col20 (type: tinyint)
                     sort order: +++++++++++++++++++++
                     Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
             Reduce Operator Tree:
@@ -374,6 +375,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), _col5 (type: tinyint), _col6 (type: tinyint), _col7 (type: tinyint), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: float), _col19 (type: double), _col20 (type: tinyint)
                     sort order: +++++++++++++++++++++
                     Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_7.q.out b/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
index ca78b0f..897b131 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
@@ -83,6 +83,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean), _col1 (type: bigint), _col2 (type: smallint), _col3 (type: tinyint), _col4 (type: timestamp), _col5 (type: string), _col6 (type: bigint), _col7 (type: int), _col8 (type: smallint), _col9 (type: tinyint), _col10 (type: int), _col11 (type: bigint), _col12 (type: int), _col13 (type: tinyint), _col14 (type: tinyint)
                         sort order: +++++++++++++++
                         Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
@@ -272,6 +273,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean), _col1 (type: bigint), _col2 (type: smallint), _col3 (type: tinyint), _col4 (type: timestamp), _col5 (type: string), _col6 (type: bigint), _col7 (type: int), _col8 (type: smallint), _col9 (type: tinyint), _col10 (type: int), _col11 (type: bigint), _col12 (type: int), _col13 (type: tinyint), _col14 (type: tinyint)
                         sort order: +++++++++++++++
                         Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_8.q.out b/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
index 0ab8181..b3a3ce3 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
@@ -79,6 +79,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: timestamp), _col1 (type: double), _col2 (type: boolean), _col3 (type: string), _col4 (type: float), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: float), _col13 (type: double)
                         sort order: ++++++++++++++
                         Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
@@ -255,6 +256,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: timestamp), _col1 (type: double), _col2 (type: boolean), _col3 (type: string), _col4 (type: float), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: float), _col13 (type: double)
                         sort order: ++++++++++++++
                         Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out b/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out
index 892fca3..3472ba1 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out
@@ -172,6 +172,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: bigint), _col1 (type: double)
                         sort order: ++
                         Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: double)
             Execution mode: vectorized
         Reducer 2 
@@ -347,6 +348,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: double), _col1 (type: double)
                         sort order: ++
                         Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: double), _col4 (type: double), _col5 (type: double)
             Execution mode: vectorized
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out
index d8c4b06..a61f391 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out
@@ -74,6 +74,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: double)
                       sort order: +
                       Statistics: Num rows: 200 Data size: 54496 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
index 5c2ece6..db0e78b 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
@@ -949,6 +949,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: timestamp), _col3 (type: string), _col4 (type: boolean), _col5 (type: tinyint), _col6 (type: float), _col7 (type: timestamp), _col8 (type: smallint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: int), _col12 (type: double), _col13 (type: smallint), _col14 (type: smallint), _col15 (type: smallint), _col16 (type: double), _col17 (type: double), _col18 (type: float), _col19 (type: double), _col20 (type: double), _col21 (type: tinyint), _col22 (type: double)
                         sort order: +++++++++++++++++++++++
                         Statistics: Num rows: 9898 Data size: 2128105 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
@@ -1207,6 +1208,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: boolean), _col4 (type: float), _col5 (type: double), _col6 (type: timestamp), _col7 (type: smallint), _col8 (type: string), _col9 (type: boolean), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: float), _col15 (type: float), _col16 (type: float), _col17 (type: double), _col18 (type: double), _col19 (type: bigint), _col20 (type: double), _col21 (type: smallint), _col22 (type: bigint), _col23 (type: double), _col24 (type: smallint)
                         sort order: +++++++++++++++++++++++++
                         Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
@@ -1414,6 +1416,7 @@ STAGE PLANS:
                         key expressions: _col8 (type: boolean), _col1 (type: string), _col3 (type: timestamp), _col5 (type: float), _col6 (type: bigint), _col1 (type: string), _col4 (type: double), _col0 (type: int), _col7 (type: smallint), _col4 (type: double), _col9 (type: int), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: float), _col13 (type: bigint), _col14 (type: double), _col15 (type: double), _col16 (type: bigint), _col17 (type: double), _col18 (type: double), _col19 (type: double), _col20 (type: smallint), _col21 (type: int)
                         sort order: +++++++++++++++++++++++
                         Statistics: Num rows: 10922 Data size: 2348269 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: boolean)
             Execution mode: vectorized
         Reducer 2 
@@ -1680,6 +1683,7 @@ STAGE PLANS:
                         key expressions: _col5 (type: smallint), _col1 (type: string), _col2 (type: double), _col3 (type: float), _col4 (type: bigint), _col6 (type: double), _col7 (type: int), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: float), _col13 (type: int), _col14 (type: double), _col15 (type: double)
                         sort order: +++++++++++++++
                         Statistics: Num rows: 3868 Data size: 831633 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: timestamp)
             Execution mode: vectorized
         Reducer 2 
@@ -1913,6 +1917,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: int), _col7 (type: double), _col8 (type: int), _col9 (type: bigint), _col10 (type: bigint)
                     sort order: +++++++++++
                     Statistics: Num rows: 1251 Data size: 268968 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
             Reduce Operator Tree:
@@ -2375,6 +2380,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: double), _col11 (type: tinyint), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: double), _col19 (type: double), _col20 (type: double), _col21 (type: double), _col22 (type: double), _col23 (type: double), _col24 (type: double), _col25 (type: double), _col26 (type: double), _col27 (type: tinyint), _col28 (type: double), _col29 (type: double), _col30 (type: double), _col31 (type: double), _col32 (type: double), _col33 (type: double), _col34 (type: bigint), _col35 (type: double), _col36 (type: bigint), _col37 (type: bigint), _col38 (type: double)
                     sort order: +++++++++++++++++++++++++++++++++++++++
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/udf_case_column_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_case_column_pruning.q.out b/ql/src/test/results/clientpositive/udf_case_column_pruning.q.out
index a9bfd24..7a78b83 100644
--- a/ql/src/test/results/clientpositive/udf_case_column_pruning.q.out
+++ b/ql/src/test/results/clientpositive/udf_case_column_pruning.q.out
@@ -85,6 +85,7 @@ STAGE PLANS:
               key expressions: _col0 (type: int)
               sort order: +
               Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/union3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union3.q.out b/ql/src/test/results/clientpositive/union3.q.out
index 1cb211c..911bd70 100644
--- a/ql/src/test/results/clientpositive/union3.q.out
+++ b/ql/src/test/results/clientpositive/union3.q.out
@@ -59,6 +59,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
           Number of rows: 1
@@ -136,6 +137,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
           Number of rows: 1
@@ -165,6 +167,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
           Number of rows: 1
@@ -194,6 +197,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
           Number of rows: 1

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/union_remove_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_25.q.out b/ql/src/test/results/clientpositive/union_remove_25.q.out
index 37d6a53..c98d4c8 100644
--- a/ql/src/test/results/clientpositive/union_remove_25.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_25.q.out
@@ -291,6 +291,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 500 Data size: 5000 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -341,6 +342,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 500 Data size: 5000 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -473,6 +475,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -524,6 +527,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/union_top_level.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_top_level.q.out b/ql/src/test/results/clientpositive/union_top_level.q.out
index 2773ad8..bdf95c0 100644
--- a/ql/src/test/results/clientpositive/union_top_level.q.out
+++ b/ql/src/test/results/clientpositive/union_top_level.q.out
@@ -45,6 +45,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -118,6 +119,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -157,6 +159,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -291,6 +294,7 @@ STAGE PLANS:
             Reduce Output Operator
               sort order: 
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -395,6 +399,7 @@ STAGE PLANS:
             Reduce Output Operator
               sort order: 
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -501,6 +506,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -634,6 +640,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -673,6 +680,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -787,6 +795,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -915,6 +924,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -954,6 +964,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -1060,6 +1071,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -1188,6 +1200,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -1227,6 +1240,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
index e5d56ec..1c2adfe 100644
--- a/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
@@ -166,6 +166,7 @@ STAGE PLANS:
               key expressions: _col0 (type: int)
               sort order: +
               Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_2.q.out b/ql/src/test/results/clientpositive/vector_char_2.q.out
index 880dd42..e2d4707 100644
--- a/ql/src/test/results/clientpositive/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_2.q.out
@@ -110,6 +110,7 @@ STAGE PLANS:
               key expressions: _col0 (type: char(20))
               sort order: +
               Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint), _col2 (type: bigint)
       Reduce Operator Tree:
         Select Operator
@@ -240,6 +241,7 @@ STAGE PLANS:
               key expressions: _col0 (type: char(20))
               sort order: -
               Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint), _col2 (type: bigint)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_char_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_simple.q.out b/ql/src/test/results/clientpositive/vector_char_simple.q.out
index a61cbc8..a67836d 100644
--- a/ql/src/test/results/clientpositive/vector_char_simple.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_simple.q.out
@@ -74,6 +74,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: char(10))
                 sort order: +
                 Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: char(20))
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -167,6 +168,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: char(10))
                 sort order: -
                 Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: char(20))
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -262,6 +264,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: int)
       Execution mode: vectorized
       Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_coalesce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_coalesce.q.out b/ql/src/test/results/clientpositive/vector_coalesce.q.out
index 327ff58..681a877 100644
--- a/ql/src/test/results/clientpositive/vector_coalesce.q.out
+++ b/ql/src/test/results/clientpositive/vector_coalesce.q.out
@@ -36,6 +36,7 @@ STAGE PLANS:
                   key expressions: null (type: double), _col1 (type: string), _col2 (type: int), _col3 (type: float), _col4 (type: smallint), _col5 (type: string)
                   sort order: ++++++
                   Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
@@ -119,6 +120,7 @@ STAGE PLANS:
                   key expressions: null (type: tinyint), _col1 (type: double), _col2 (type: int), _col3 (type: double)
                   sort order: ++++
                   Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
@@ -200,6 +202,7 @@ STAGE PLANS:
                   key expressions: null (type: float), null (type: bigint), 0.0 (type: float)
                   sort order: +++
                   Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
@@ -283,6 +286,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: timestamp)
                   sort order: +++
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
@@ -364,6 +368,7 @@ STAGE PLANS:
                   key expressions: null (type: float), null (type: bigint), null (type: float)
                   sort order: +++
                   Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_data_types.q.out b/ql/src/test/results/clientpositive/vector_data_types.q.out
index 86f1677..e857e37 100644
--- a/ql/src/test/results/clientpositive/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/vector_data_types.q.out
@@ -120,6 +120,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                 sort order: +++
                 Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)
       Reduce Operator Tree:
         Select Operator
@@ -205,6 +206,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                 sort order: +++
                 Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)
       Execution mode: vectorized
       Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
index e57d6c1..41094e5 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
@@ -42,6 +42,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: decimal(25,14)), _col1 (type: decimal(26,14)), _col2 (type: double), _col3 (type: double), _col4 (type: decimal(12,10)), _col5 (type: int), _col6 (type: smallint), _col7 (type: tinyint), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: double), _col11 (type: float), _col12 (type: string), _col13 (type: timestamp)
                   sort order: ++++++++++++++
                   Statistics: Num rows: 228 Data size: 39491 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
index 3e7077e..db5c96b 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
@@ -252,6 +252,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
index a3b2e59..81724dc 100644
--- a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
+++ b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
@@ -380,6 +380,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_non_string_partition.q.out b/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
index bf2e432..7e02b6f 100644
--- a/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
+++ b/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
@@ -53,6 +53,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: int)
                   sort order: +
                   Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: tinyint)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -123,6 +124,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: int), _col1 (type: string)
                   sort order: ++
                   Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out b/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
index a2762a3..98363c7 100644
--- a/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
+++ b/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
@@ -276,6 +276,7 @@ STAGE PLANS:
                 key expressions: _col5 (type: int), _col2 (type: date)
                 sort order: ++
                 Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: timestamp), _col4 (type: float)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -301,6 +302,7 @@ STAGE PLANS:
               key expressions: _col5 (type: int), _col2 (type: date)
               sort order: ++
               Statistics: Num rows: 25 Data size: 7200 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: timestamp), _col4 (type: float)
       Reduce Operator Tree:
         Select Operator
@@ -1029,6 +1031,7 @@ STAGE PLANS:
                 key expressions: _col4 (type: int), _col5 (type: date)
                 sort order: ++
                 Statistics: Num rows: 137 Data size: 31776 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp), _col3 (type: float)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -1054,6 +1057,7 @@ STAGE PLANS:
               key expressions: _col4 (type: int), _col5 (type: date)
               sort order: ++
               Statistics: Num rows: 25 Data size: 5775 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp), _col3 (type: float)
       Reduce Operator Tree:
         Select Operator
@@ -1830,6 +1834,7 @@ STAGE PLANS:
                 key expressions: _col4 (type: int), _col5 (type: timestamp)
                 sort order: ++
                 Statistics: Num rows: 137 Data size: 33968 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: date), _col3 (type: float)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -1855,6 +1860,7 @@ STAGE PLANS:
               key expressions: _col4 (type: int), _col5 (type: timestamp)
               sort order: ++
               Statistics: Num rows: 25 Data size: 6175 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: date), _col3 (type: float)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
index 5352885..8c58aa5 100644
--- a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
@@ -74,6 +74,7 @@ STAGE PLANS:
               key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
               sort order: ++++
               Statistics: Num rows: 763 Data size: 180068 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col4 (type: decimal(20,10))
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_string_concat.q.out b/ql/src/test/results/clientpositive/vector_string_concat.q.out
index 072d837..547d0b6 100644
--- a/ql/src/test/results/clientpositive/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/vector_string_concat.q.out
@@ -343,6 +343,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
index 252d45d..6afedef 100644
--- a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
+++ b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
@@ -74,6 +74,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: varchar(10))
                 sort order: +
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: varchar(20))
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -167,6 +168,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: varchar(10))
                 sort order: -
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: varchar(20))
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -262,6 +264,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: int)
       Execution mode: vectorized
       Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_13.q.out b/ql/src/test/results/clientpositive/vectorization_13.q.out
index 38966d1..cffe994 100644
--- a/ql/src/test/results/clientpositive/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_13.q.out
@@ -126,6 +126,7 @@ STAGE PLANS:
               key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), _col5 (type: tinyint), _col6 (type: tinyint), _col7 (type: tinyint), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: float), _col19 (type: double), _col20 (type: tinyint)
               sort order: +++++++++++++++++++++
               Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: boolean), KEY.reducesinkkey1 (type: tinyint), KEY.reducesinkkey2 (type: timestamp), KEY.reducesinkkey3 (type: float), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: tinyint), KEY.reducesinkkey6 (type: tinyint), KEY.reducesinkkey7 (type: tinyint), KEY.reducesinkkey8 (type: double), KEY.reducesinkkey9 (type: double), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey11 (type: double), KEY.reducesinkkey12 (type: double), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey14 (type: double), KEY.reducesinkkey15 (type: double), KEY.reducesinkkey16 (type: double), KEY.reducesinkkey17 (type: double), KEY.reducesinkkey18 (type: float), KEY.reducesinkkey19 (type: double), KEY.reducesinkkey20 (type: tinyint)
@@ -382,6 +383,7 @@ STAGE PLANS:
               key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), _col5 (type: tinyint), _col6 (type: tinyint), _col7 (type: tinyint), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: float), _col19 (type: double), _col20 (type: tinyint)
               sort order: +++++++++++++++++++++
               Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: boolean), KEY.reducesinkkey1 (type: tinyint), KEY.reducesinkkey2 (type: timestamp), KEY.reducesinkkey3 (type: float), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: tinyint), KEY.reducesinkkey6 (type: tinyint), KEY.reducesinkkey7 (type: tinyint), KEY.reducesinkkey8 (type: double), KEY.reducesinkkey9 (type: double), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey11 (type: double), KEY.reducesinkkey12 (type: double), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey14 (type: double), KEY.reducesinkkey15 (type: double), KEY.reducesinkkey16 (type: double), KEY.reducesinkkey17 (type: double), KEY.reducesinkkey18 (type: float), KEY.reducesinkkey19 (type: double), KEY.reducesinkkey20 (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_7.q.out b/ql/src/test/results/clientpositive/vectorization_7.q.out
index 6e2a0ea..c3179fb 100644
--- a/ql/src/test/results/clientpositive/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_7.q.out
@@ -78,6 +78,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: boolean), _col1 (type: bigint), _col2 (type: smallint), _col3 (type: tinyint), _col4 (type: timestamp), _col5 (type: string), _col6 (type: bigint), _col7 (type: int), _col8 (type: smallint), _col9 (type: tinyint), _col10 (type: int), _col11 (type: bigint), _col12 (type: int), _col13 (type: tinyint), _col14 (type: tinyint)
                   sort order: +++++++++++++++
                   Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
@@ -260,6 +261,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: boolean), _col1 (type: bigint), _col2 (type: smallint), _col3 (type: tinyint), _col4 (type: timestamp), _col5 (type: string), _col6 (type: bigint), _col7 (type: int), _col8 (type: smallint), _col9 (type: tinyint), _col10 (type: int), _col11 (type: bigint), _col12 (type: int), _col13 (type: tinyint), _col14 (type: tinyint)
                   sort order: +++++++++++++++
                   Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vectorization_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_8.q.out b/ql/src/test/results/clientpositive/vectorization_8.q.out
index c38fad1..0f98f54 100644
--- a/ql/src/test/results/clientpositive/vectorization_8.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_8.q.out
@@ -74,6 +74,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: timestamp), _col1 (type: double), _col2 (type: boolean), _col3 (type: string), _col4 (type: float), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: float), _col13 (type: double)
                   sort order: ++++++++++++++
                   Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
@@ -243,6 +244,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: timestamp), _col1 (type: double), _col2 (type: boolean), _col3 (type: string), _col4 (type: float), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: float), _col13 (type: double)
                   sort order: ++++++++++++++
                   Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vectorization_div0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_div0.q.out b/ql/src/test/results/clientpositive/vectorization_div0.q.out
index 9cd35d3..59ee5ad 100644
--- a/ql/src/test/results/clientpositive/vectorization_div0.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_div0.q.out
@@ -183,6 +183,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: bigint), _col1 (type: double)
                   sort order: ++
                   Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: double)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -351,6 +352,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: double), _col1 (type: double)
                   sort order: ++
                   Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: double), _col4 (type: double), _col5 (type: double)
       Execution mode: vectorized
       Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vectorization_part_project.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/vectorization_part_project.q.out
index f98e79c..6fbc37c 100644
--- a/ql/src/test/results/clientpositive/vectorization_part_project.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_part_project.q.out
@@ -69,6 +69,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: double)
                 sort order: +
                 Statistics: Num rows: 200 Data size: 54496 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
index 570e649..4ddfa6e 100644
--- a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
@@ -920,6 +920,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: timestamp), _col3 (type: string), _col4 (type: boolean), _col5 (type: tinyint), _col6 (type: float), _col7 (type: timestamp), _col8 (type: smallint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: int), _col12 (type: double), _col13 (type: smallint), _col14 (type: smallint), _col15 (type: smallint), _col16 (type: double), _col17 (type: double), _col18 (type: float), _col19 (type: double), _col20 (type: double), _col21 (type: tinyint), _col22 (type: double)
                   sort order: +++++++++++++++++++++++
                   Statistics: Num rows: 9898 Data size: 2128105 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
@@ -1171,6 +1172,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: boolean), _col4 (type: float), _col5 (type: double), _col6 (type: timestamp), _col7 (type: smallint), _col8 (type: string), _col9 (type: boolean), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: float), _col15 (type: float), _col16 (type: float), _col17 (type: double), _col18 (type: double), _col19 (type: bigint), _col20 (type: double), _col21 (type: smallint), _col22 (type: bigint), _col23 (type: double), _col24 (type: smallint)
                   sort order: +++++++++++++++++++++++++
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
@@ -1371,6 +1373,7 @@ STAGE PLANS:
                   key expressions: _col8 (type: boolean), _col1 (type: string), _col3 (type: timestamp), _col5 (type: float), _col6 (type: bigint), _col1 (type: string), _col4 (type: double), _col0 (type: int), _col7 (type: smallint), _col4 (type: double), _col9 (type: int), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: float), _col13 (type: bigint), _col14 (type: double), _col15 (type: double), _col16 (type: bigint), _col17 (type: double), _col18 (type: double), _col19 (type: double), _col20 (type: smallint), _col21 (type: int)
                   sort order: +++++++++++++++++++++++
                   Statistics: Num rows: 10922 Data size: 2348269 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: boolean)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -1630,6 +1633,7 @@ STAGE PLANS:
                   key expressions: _col5 (type: smallint), _col1 (type: string), _col2 (type: double), _col3 (type: float), _col4 (type: bigint), _col6 (type: double), _col7 (type: int), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: float), _col13 (type: int), _col14 (type: double), _col15 (type: double)
                   sort order: +++++++++++++++
                   Statistics: Num rows: 3868 Data size: 831633 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: timestamp)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -1866,6 +1870,7 @@ STAGE PLANS:
               key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: int), _col7 (type: double), _col8 (type: int), _col9 (type: bigint), _col10 (type: bigint)
               sort order: +++++++++++
               Statistics: Num rows: 1251 Data size: 268968 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey1 (type: int), KEY.reducesinkkey2 (type: double), KEY.reducesinkkey3 (type: double), KEY.reducesinkkey4 (type: bigint), KEY.reducesinkkey5 (type: double), KEY.reducesinkkey6 (type: int), KEY.reducesinkkey7 (type: double), KEY.reducesinkkey8 (type: int), KEY.reducesinkkey9 (type: bigint), KEY.reducesinkkey10 (type: bigint)
@@ -2334,6 +2339,7 @@ STAGE PLANS:
               key expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: double), _col11 (type: tinyint), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: double), _col19 (type: double), _col20 (type: double), _col21 (type: double), _col22 (type: double), _col23 (type: double), _col24 (type: double), _col25 (type: double), _col26 (type: double), _col27 (type: tinyint), _col28 (type: double), _col29 (type: double), _col30 (type: double), _col31 (type: double), _col32 (type: double), _col33 (type: double), _col34 (type: bigint), _col35 (type: double), _col36 (type: bigint), _col37 (type: bigint), _col38 (type: double)
               sort order: +++++++++++++++++++++++++++++++++++++++
               Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: timestamp), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: double), KEY.reducesinkkey3 (type: double), KEY.reducesinkkey4 (type: double), KEY.reducesinkkey5 (type: double), KEY.reducesinkkey4 (type: double), KEY.reducesinkkey7 (type: double), KEY.reducesinkkey8 (type: bigint), KEY.reducesinkkey9 (type: bigint), KEY.reducesinkkey10 (type: double), KEY.reducesinkkey11 (type: tinyint), KEY.reducesinkkey12 (type: double), KEY.reducesinkkey13 (type: double), KEY.reducesinkkey14 (type: double), KEY.reducesinkkey15 (type: double), KEY.reducesinkkey16 (type: double), KEY.reducesinkkey17 (type: double), KEY.reducesinkkey18 (type: double), KEY.reducesinkkey19 (type: double), KEY.reducesinkkey20 (type: double), KEY.reducesinkkey21 (type: double), KEY.reducesinkkey22 (type: double), KEY.reducesinkkey23 (type: double), KEY.reducesinkkey24 (type: double), KEY.reducesinkkey25 (type: double), KEY.reducesinkkey26 (type: double), KEY.redu
 cesinkkey27 (type: tinyint), KEY.reducesinkkey28 (type: double), KEY.reducesinkkey29 (type: double), KEY.reducesinkkey30 (type: double), KEY.reducesinkkey31 (type: double), KEY.reducesinkkey32 (type: double), KEY.reducesinkkey33 (type: double), KEY.reducesinkkey34 (type: bigint), KEY.reducesinkkey35 (type: double), KEY.reducesinkkey36 (type: bigint), KEY.reducesinkkey8 (type: bigint), KEY.reducesinkkey38 (type: double)


[27/27] hive git commit: HIVE-12338: Add webui to HiveServer2 (Jimmy, reviewed by Mohit, Szehon, Lefty)

Posted by om...@apache.org.
HIVE-12338: Add webui to HiveServer2 (Jimmy, reviewed by Mohit, Szehon, Lefty)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eb1b80d9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eb1b80d9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eb1b80d9

Branch: refs/heads/master-fixed
Commit: eb1b80d9fd0c578cc155fd48b969324c93b8c66a
Parents: f2e46a2
Author: Jimmy Xiang <jx...@apache.org>
Authored: Thu Nov 19 08:10:29 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:38 2015 -0800

----------------------------------------------------------------------
 common/pom.xml                                  |   5 +
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   5 +
 .../hive/http/AdminAuthorizedServlet.java       |  45 ++
 .../java/org/apache/hive/http/ConfServlet.java  | 101 +++++
 .../java/org/apache/hive/http/HttpServer.java   | 316 ++++++++++++++
 .../org/apache/hive/http/JMXJsonServlet.java    | 412 +++++++++++++++++++
 pom.xml                                         |   1 +
 ql/pom.xml                                      |   6 +
 service/pom.xml                                 |  56 +++
 .../hive/service/cli/operation/Operation.java   |   2 +-
 .../service/cli/operation/OperationManager.java |  26 +-
 .../service/cli/operation/SQLOperation.java     |   8 +-
 .../service/cli/session/HiveSessionBase.java    |   4 +
 .../service/cli/session/HiveSessionImpl.java    |  12 +
 .../service/cli/session/SessionManager.java     |  16 +-
 .../apache/hive/service/server/HiveServer2.java |  47 +++
 .../hive-webapps/hiveserver2/hiveserver2.jsp    | 186 +++++++++
 .../hive-webapps/hiveserver2/index.html         |  20 +
 .../static/css/bootstrap-theme.min.css          |  10 +
 .../hive-webapps/static/css/bootstrap.min.css   |   9 +
 .../resources/hive-webapps/static/css/hive.css  |  24 ++
 .../fonts/glyphicons-halflings-regular.eot      | Bin 0 -> 14079 bytes
 .../fonts/glyphicons-halflings-regular.svg      | 228 ++++++++++
 .../fonts/glyphicons-halflings-regular.ttf      | Bin 0 -> 29512 bytes
 .../fonts/glyphicons-halflings-regular.woff     | Bin 0 -> 16448 bytes
 .../hive-webapps/static/hive_logo.jpeg          | Bin 0 -> 5616 bytes
 spark-client/pom.xml                            |   6 +
 27 files changed, 1529 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/common/pom.xml
----------------------------------------------------------------------
diff --git a/common/pom.xml b/common/pom.xml
index ee74282..72bb550 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -56,6 +56,11 @@
       <version>${commons-lang.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.eclipse.jetty.aggregate</groupId>
+      <artifactId>jetty-all</artifactId>
+      <version>${jetty.version}</version>
+    </dependency>
+    <dependency>
       <groupId>joda-time</groupId>
       <artifactId>joda-time</artifactId>
       <version>${joda.version}</version>

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index db942b0..9e805bd 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1847,6 +1847,11 @@ public class HiveConf extends Configuration {
     HIVE_SERVER2_PARALLEL_COMPILATION("hive.driver.parallel.compilation", false, "Whether to\n" +
         "enable parallel compilation between sessions on HiveServer2. The default is false."),
 
+    // HiveServer2 WebUI
+    HIVE_SERVER2_WEBUI_BIND_HOST("hive.server2.webui.host", "0.0.0.0", "The host address the HiveServer2 WebUI will listen on"),
+    HIVE_SERVER2_WEBUI_PORT("hive.server2.webui.port", 10002, "The port the HiveServer2 WebUI will listen on"),
+    HIVE_SERVER2_WEBUI_MAX_THREADS("hive.server2.webui.max.threads", 50, "The max HiveServer2 WebUI threads"),
+
     // Tez session settings
     HIVE_SERVER2_TEZ_DEFAULT_QUEUES("hive.server2.tez.default.queues", "",
         "A list of comma separated values corresponding to YARN queues of the same name.\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/common/src/java/org/apache/hive/http/AdminAuthorizedServlet.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/http/AdminAuthorizedServlet.java b/common/src/java/org/apache/hive/http/AdminAuthorizedServlet.java
new file mode 100644
index 0000000..5d957c2
--- /dev/null
+++ b/common/src/java/org/apache/hive/http/AdminAuthorizedServlet.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.http;
+
+import java.io.IOException;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.eclipse.jetty.servlet.DefaultServlet;
+
+/**
+ * General servlet which is admin-authorized.
+ */
+public class AdminAuthorizedServlet extends DefaultServlet {
+
+  private static final long serialVersionUID = 1L;
+
+  @Override
+  protected void doGet(HttpServletRequest request, HttpServletResponse response)
+    throws ServletException, IOException {
+    // Do the authorization
+    if (HttpServer.hasAdministratorAccess(getServletContext(), request,
+        response)) {
+      // Authorization is done. Just call super.
+      super.doGet(request, response);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/common/src/java/org/apache/hive/http/ConfServlet.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/http/ConfServlet.java b/common/src/java/org/apache/hive/http/ConfServlet.java
new file mode 100644
index 0000000..253df4f
--- /dev/null
+++ b/common/src/java/org/apache/hive/http/ConfServlet.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.http;
+
+import java.io.IOException;
+import java.io.Writer;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * A servlet to print out the running configuration data.
+ */
+public class ConfServlet extends HttpServlet {
+  private static final long serialVersionUID = 1L;
+
+  private static final String FORMAT_JSON = "json";
+  private static final String FORMAT_XML = "xml";
+  private static final String FORMAT_PARAM = "format";
+
+  /**
+   * Return the Configuration of the daemon hosting this servlet.
+   * This is populated when the HttpServer starts.
+   */
+  private Configuration getConfFromContext() {
+    Configuration conf = (Configuration)getServletContext().getAttribute(
+        HttpServer.CONF_CONTEXT_ATTRIBUTE);
+    assert conf != null;
+    return conf;
+  }
+
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response)
+      throws ServletException, IOException {
+
+    if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+                                                   request, response)) {
+      return;
+    }
+
+    String format = request.getParameter(FORMAT_PARAM);
+    if (null == format) {
+      format = FORMAT_XML;
+    }
+
+    if (FORMAT_XML.equals(format)) {
+      response.setContentType("text/xml; charset=utf-8");
+    } else if (FORMAT_JSON.equals(format)) {
+      response.setContentType("application/json; charset=utf-8");
+    }
+
+    Writer out = response.getWriter();
+    try {
+      writeResponse(getConfFromContext(), out, format);
+    } catch (BadFormatException bfe) {
+      response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
+    }
+    out.close();
+  }
+
+  /**
+   * Guts of the servlet - extracted for easy testing.
+   */
+  static void writeResponse(Configuration conf, Writer out, String format)
+    throws IOException, BadFormatException {
+    if (FORMAT_JSON.equals(format)) {
+      Configuration.dumpConfiguration(conf, out);
+    } else if (FORMAT_XML.equals(format)) {
+      conf.writeXml(out);
+    } else {
+      throw new BadFormatException("Bad format: " + format);
+    }
+  }
+
+  public static class BadFormatException extends Exception {
+    private static final long serialVersionUID = 1L;
+
+    public BadFormatException(String msg) {
+      super(msg);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/common/src/java/org/apache/hive/http/HttpServer.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/http/HttpServer.java b/common/src/java/org/apache/hive/http/HttpServer.java
new file mode 100644
index 0000000..1ff8d7c
--- /dev/null
+++ b/common/src/java/org/apache/hive/http/HttpServer.java
@@ -0,0 +1,316 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.http;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URL;
+
+import javax.servlet.ServletContext;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.util.Shell;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.Logger;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender;
+import org.apache.logging.log4j.core.appender.FileManager;
+import org.apache.logging.log4j.core.appender.OutputStreamManager;
+import org.eclipse.jetty.server.Connector;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.handler.ContextHandler.Context;
+import org.eclipse.jetty.server.handler.ContextHandlerCollection;
+import org.eclipse.jetty.server.nio.SelectChannelConnector;
+import org.eclipse.jetty.servlet.DefaultServlet;
+import org.eclipse.jetty.servlet.ServletContextHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+import org.eclipse.jetty.util.thread.QueuedThreadPool;
+import org.eclipse.jetty.webapp.WebAppContext;
+
+/**
+ * A simple embedded Jetty server to serve as HS2/HMS web UI.
+ */
+public class HttpServer {
+  public static final String CONF_CONTEXT_ATTRIBUTE = "hive.conf";
+  public static final String ADMINS_ACL = "admins.acl";
+
+  private final AccessControlList adminsAcl;
+  private final String appDir;
+  private final String name;
+  private final String host;
+  private final int port;
+  private final int maxThreads;
+  private final Configuration conf;
+  private final WebAppContext webAppContext;
+  private final Server webServer;
+
+  /**
+   * Create a status server on the given port.
+   */
+  public HttpServer(String name, String host, int port, int maxThreads,
+      Configuration conf, AccessControlList adminsAcl) throws IOException {
+    this.name = name;
+    this.host = host;
+    this.port = port;
+    this.maxThreads = maxThreads;
+    this.conf = conf;
+    this.adminsAcl = adminsAcl;
+
+    webServer = new Server();
+    appDir = getWebAppsPath(name);
+    webAppContext = createWebAppContext();
+    initializeWebServer();
+  }
+
+  public void start() throws Exception {
+    webServer.start();
+  }
+
+  public void stop() throws Exception {
+    webServer.stop();
+  }
+
+  public int getPort() {
+    return port;
+  }
+
+  /**
+   * Set servlet context attribute that can be used in jsp.
+   */
+  public void setContextAttribute(String name, Object value) {
+    webAppContext.getServletContext().setAttribute(name, value);
+  }
+
+  /**
+   * Checks the user has privileges to access to instrumentation servlets.
+   * <p/>
+   * If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE
+   * (default value) it always returns TRUE.
+   * <p/>
+   * If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE
+   * it will check if the current user is in the admin ACLS. If the user is
+   * in the admin ACLs it returns TRUE, otherwise it returns FALSE.
+   *
+   * @param servletContext the servlet context.
+   * @param request the servlet request.
+   * @param response the servlet response.
+   * @return TRUE/FALSE based on the logic described above.
+   */
+  static boolean isInstrumentationAccessAllowed(
+    ServletContext servletContext, HttpServletRequest request,
+    HttpServletResponse response) throws IOException {
+    Configuration conf =
+      (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
+
+    boolean access = true;
+    boolean adminAccess = conf.getBoolean(
+      CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
+      false);
+    if (adminAccess) {
+      access = hasAdministratorAccess(servletContext, request, response);
+    }
+    return access;
+  }
+
+  /**
+   * Does the user sending the HttpServletRequest have the administrator ACLs? If
+   * it isn't the case, response will be modified to send an error to the user.
+   *
+   * @param servletContext
+   * @param request
+   * @param response used to send the error response if user does not have admin access.
+   * @return true if admin-authorized, false otherwise
+   * @throws IOException
+   */
+  static boolean hasAdministratorAccess(
+      ServletContext servletContext, HttpServletRequest request,
+      HttpServletResponse response) throws IOException {
+    Configuration conf =
+        (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
+    // If there is no authorization, anybody has administrator access.
+    if (!conf.getBoolean(
+        CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
+      return true;
+    }
+
+    String remoteUser = request.getRemoteUser();
+    if (remoteUser == null) {
+      response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
+                         "Unauthenticated users are not " +
+                         "authorized to access this page.");
+      return false;
+    }
+
+    if (servletContext.getAttribute(ADMINS_ACL) != null &&
+        !userHasAdministratorAccess(servletContext, remoteUser)) {
+      response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
+          + remoteUser + " is unauthorized to access this page.");
+      return false;
+    }
+
+    return true;
+  }
+
+  /**
+   * Get the admin ACLs from the given ServletContext and check if the given
+   * user is in the ACL.
+   *
+   * @param servletContext the context containing the admin ACL.
+   * @param remoteUser the remote user to check for.
+   * @return true if the user is present in the ACL, false if no ACL is set or
+   *         the user is not present
+   */
+  static boolean userHasAdministratorAccess(ServletContext servletContext,
+      String remoteUser) {
+    AccessControlList adminsAcl = (AccessControlList) servletContext
+        .getAttribute(ADMINS_ACL);
+    UserGroupInformation remoteUserUGI =
+        UserGroupInformation.createRemoteUser(remoteUser);
+    return adminsAcl != null && adminsAcl.isUserAllowed(remoteUserUGI);
+  }
+
+  /**
+   * Create the web context for the application of specified name
+   */
+  WebAppContext createWebAppContext() {
+    WebAppContext ctx = new WebAppContext();
+    setContextAttributes(ctx.getServletContext());
+    ctx.setDisplayName(name);
+    ctx.setContextPath("/");
+    ctx.setWar(appDir + "/" + name);
+    return ctx;
+  }
+
+  /**
+   * Create a default regular channel connector for "http" requests
+   */
+  Connector createDefaultChannelConnector() {
+    SelectChannelConnector connector = new SelectChannelConnector();
+    connector.setLowResourcesMaxIdleTime(10000);
+    connector.setAcceptQueueSize(maxThreads);
+    connector.setResolveNames(false);
+    connector.setUseDirectBuffers(false);
+    connector.setReuseAddress(!Shell.WINDOWS);
+    return connector;
+  }
+
+  void setContextAttributes(Context ctx) {
+    ctx.setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
+    ctx.setAttribute(ADMINS_ACL, adminsAcl);
+  }
+
+  void initializeWebServer() {
+    // Create the thread pool for the web server to handle HTTP requests
+    QueuedThreadPool threadPool = maxThreads <= 0 ? new QueuedThreadPool()
+      : new QueuedThreadPool(maxThreads);
+    threadPool.setDaemon(true);
+    threadPool.setName(name + "-web");
+    webServer.setThreadPool(threadPool);
+
+    // Create the channel connector for the web server
+    Connector connector = createDefaultChannelConnector();
+    connector.setHost(host);
+    connector.setPort(port);
+    webServer.addConnector(connector);
+
+    // Configure web application contexts for the web server
+    ContextHandlerCollection contexts = new ContextHandlerCollection();
+    contexts.addHandler(webAppContext);
+    webServer.setHandler(contexts);
+
+    addServlet("jmx", "/jmx", JMXJsonServlet.class);
+    addServlet("conf", "/conf", ConfServlet.class);
+
+    ServletContextHandler staticCtx =
+      new ServletContextHandler(contexts, "/static");
+    staticCtx.setResourceBase(appDir + "/static");
+    staticCtx.addServlet(DefaultServlet.class, "/*");
+    staticCtx.setDisplayName("static");
+
+    String logDir = getLogDir();
+    if (logDir != null) {
+      ServletContextHandler logCtx =
+        new ServletContextHandler(contexts, "/logs");
+      setContextAttributes(logCtx.getServletContext());
+      logCtx.addServlet(AdminAuthorizedServlet.class, "/*");
+      logCtx.setResourceBase(logDir);
+      logCtx.setDisplayName("logs");
+    }
+  }
+
+  String getLogDir() {
+    String logDir = conf.get("hive.log.dir");
+    if (logDir == null) {
+      logDir = System.getProperty("hive.log.dir");
+    }
+    if (logDir != null) {
+      return logDir;
+    }
+
+    LoggerContext context = (LoggerContext)LogManager.getContext(false);
+    for (Logger logger: context.getLoggers()) {
+      for (Appender appender: logger.getAppenders().values()) {
+        if (appender instanceof AbstractOutputStreamAppender) {
+          OutputStreamManager manager =
+            ((AbstractOutputStreamAppender<?>)appender).getManager();
+          if (manager instanceof FileManager) {
+            String fileName = ((FileManager)manager).getFileName();
+            if (fileName != null) {
+              return fileName.substring(0, fileName.lastIndexOf('/'));
+            }
+          }
+        }
+      }
+    }
+    return null;
+  }
+
+  String getWebAppsPath(String appName) throws FileNotFoundException {
+    String relativePath = "hive-webapps/" + appName;
+    URL url = getClass().getClassLoader().getResource(relativePath);
+    if (url == null) {
+      throw new FileNotFoundException(relativePath
+          + " not found in CLASSPATH");
+    }
+    String urlString = url.toString();
+    return urlString.substring(0, urlString.lastIndexOf('/'));
+  }
+
+  /**
+   * Add a servlet in the server.
+   * @param name The name of the servlet (can be passed as null)
+   * @param pathSpec The path spec for the servlet
+   * @param clazz The servlet class
+   */
+  void addServlet(String name, String pathSpec,
+      Class<? extends HttpServlet> clazz) {
+    ServletHolder holder = new ServletHolder(clazz);
+    if (name != null) {
+      holder.setName(name);
+    }
+    webAppContext.addServlet(holder, pathSpec);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/common/src/java/org/apache/hive/http/JMXJsonServlet.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/http/JMXJsonServlet.java b/common/src/java/org/apache/hive/http/JMXJsonServlet.java
new file mode 100644
index 0000000..7535b26
--- /dev/null
+++ b/common/src/java/org/apache/hive/http/JMXJsonServlet.java
@@ -0,0 +1,412 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.http;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.lang.management.ManagementFactory;
+import java.lang.reflect.Array;
+import java.util.Iterator;
+import java.util.Set;
+
+import javax.management.AttributeNotFoundException;
+import javax.management.InstanceNotFoundException;
+import javax.management.IntrospectionException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanException;
+import javax.management.MBeanInfo;
+import javax.management.MBeanServer;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.management.ReflectionException;
+import javax.management.RuntimeErrorException;
+import javax.management.RuntimeMBeanException;
+import javax.management.openmbean.CompositeData;
+import javax.management.openmbean.CompositeType;
+import javax.management.openmbean.TabularData;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonGenerator;
+
+/*
+ * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has
+ * been rewritten to be read only and to output in a JSON format so it is not
+ * really that close to the original.
+ */
+/**
+ * Provides Read only web access to JMX.
+ * <p>
+ * This servlet generally will be placed under the /jmx URL for each
+ * HttpServer.  It provides read only
+ * access to JMX metrics.  The optional <code>qry</code> parameter
+ * may be used to query only a subset of the JMX Beans.  This query
+ * functionality is provided through the
+ * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)}
+ * method.
+ * <p>
+ * For example <code>http://.../jmx?qry=Hadoop:*</code> will return
+ * all Hadoop metrics exposed through JMX.
+ * <p>
+ * The optional <code>get</code> parameter is used to query a specific
+ * attribute of a JMX bean.  The format of the URL is
+ * <code>http://.../jmx?get=MXBeanName::AttributeName<code>
+ * <p>
+ * For example 
+ * <code>
+ * http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId
+ * </code> will return the cluster id of the namenode mxbean.
+ * <p>
+ * If the <code>qry</code> or the <code>get</code> parameter is not formatted
+ * correctly then a 400 BAD REQUEST http response code will be returned.
+ * <p>
+ * If a resource such as an mbean or attribute can not be found,
+ * a 404 SC_NOT_FOUND http response code will be returned.
+ * <p>
+ * The return format is JSON and in the form
+ * <p>
+ *  <code><pre>
+ *  {
+ *    "beans" : [
+ *      {
+ *        "name":"bean-name"
+ *        ...
+ *      }
+ *    ]
+ *  }
+ *  </pre></code>
+ *  <p>
+ *  The servlet attempts to convert the JMXBeans into JSON. Each
+ *  bean's attributes will be converted to a JSON object member.
+ *  
+ *  If the attribute is a boolean, a number, a string, or an array
+ *  it will be converted to the JSON equivalent. 
+ *  
+ *  If the value is a {@link CompositeData} then it will be converted
+ *  to a JSON object with the keys as the name of the JSON member and
+ *  the value is converted following these same rules.
+ *  
+ *  If the value is a {@link TabularData} then it will be converted
+ *  to an array of the {@link CompositeData} elements that it contains.
+ *  
+ *  All other objects will be converted to a string and output as such.
+ *  
+ *  The bean's name and modelerType will be returned for all beans.
+ *
+ *  Optional paramater "callback" should be used to deliver JSONP response.
+ *  
+ */
+public class JMXJsonServlet extends HttpServlet {
+  private static final Log LOG = LogFactory.getLog(JMXJsonServlet.class);
+  static final String ACCESS_CONTROL_ALLOW_METHODS =
+      "Access-Control-Allow-Methods";
+  static final String ACCESS_CONTROL_ALLOW_ORIGIN =
+      "Access-Control-Allow-Origin";
+
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * MBean server.
+   */
+  protected transient MBeanServer mBeanServer;
+
+  protected transient JsonFactory jsonFactory;
+
+  /**
+   * Initialize this servlet.
+   */
+  @Override
+  public void init() throws ServletException {
+    // Retrieve the MBean server
+    mBeanServer = ManagementFactory.getPlatformMBeanServer();
+    jsonFactory = new JsonFactory();
+  }
+
+  /**
+   * Process a GET request for the specified resource.
+   * 
+   * @param request
+   *          The servlet request we are processing
+   * @param response
+   *          The servlet response we are creating
+   */
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response) {
+    try {
+      if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) {
+        return;
+      }
+      JsonGenerator jg = null;
+      PrintWriter writer = null;
+      try {
+        writer = response.getWriter();
+
+        response.setContentType("application/json; charset=utf8");
+        response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, "GET");
+        response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
+
+        jg = jsonFactory.createJsonGenerator(writer);
+        jg.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET);
+        jg.useDefaultPrettyPrinter();
+        jg.writeStartObject();
+
+        // query per mbean attribute
+        String getmethod = request.getParameter("get");
+        if (getmethod != null) {
+          String[] splitStrings = getmethod.split("\\:\\:");
+          if (splitStrings.length != 2) {
+            jg.writeStringField("result", "ERROR");
+            jg.writeStringField("message", "query format is not as expected.");
+            jg.flush();
+            response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
+            return;
+          }
+          listBeans(jg, new ObjectName(splitStrings[0]), splitStrings[1],
+              response);
+          return;
+        }
+
+        // query per mbean
+        String qry = request.getParameter("qry");
+        if (qry == null) {
+          qry = "*:*";
+        }
+        listBeans(jg, new ObjectName(qry), null, response);
+      } finally {
+        if (jg != null) {
+          jg.close();
+        }
+        if (writer != null) {
+          writer.close();
+        }
+      }
+    } catch (IOException e) {
+      LOG.error("Caught an exception while processing JMX request", e);
+      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+    } catch (MalformedObjectNameException e) {
+      LOG.error("Caught an exception while processing JMX request", e);
+      response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
+    }
+  }
+
+  // --------------------------------------------------------- Private Methods
+  private void listBeans(JsonGenerator jg, ObjectName qry, String attribute, 
+      HttpServletResponse response) 
+  throws IOException {
+    LOG.debug("Listing beans for "+qry);
+    Set<ObjectName> names = null;
+    names = mBeanServer.queryNames(qry, null);
+
+    jg.writeArrayFieldStart("beans");
+    Iterator<ObjectName> it = names.iterator();
+    while (it.hasNext()) {
+      ObjectName oname = it.next();
+      MBeanInfo minfo;
+      String code = "";
+      Object attributeinfo = null;
+      try {
+        minfo = mBeanServer.getMBeanInfo(oname);
+        code = minfo.getClassName();
+        String prs = "";
+        try {
+          if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) {
+            prs = "modelerType";
+            code = (String) mBeanServer.getAttribute(oname, prs);
+          }
+          if (attribute!=null) {
+            prs = attribute;
+            attributeinfo = mBeanServer.getAttribute(oname, prs);
+          }
+        } catch (AttributeNotFoundException e) {
+          // If the modelerType attribute was not found, the class name is used
+          // instead.
+          LOG.error("getting attribute " + prs + " of " + oname
+              + " threw an exception", e);
+        } catch (MBeanException e) {
+          // The code inside the attribute getter threw an exception so log it,
+          // and fall back on the class name
+          LOG.error("getting attribute " + prs + " of " + oname
+              + " threw an exception", e);
+        } catch (RuntimeException e) {
+          // For some reason even with an MBeanException available to them
+          // Runtime exceptions can still find their way through, so treat them
+          // the same as MBeanException
+          LOG.error("getting attribute " + prs + " of " + oname
+              + " threw an exception", e);
+        } catch ( ReflectionException e ) {
+          // This happens when the code inside the JMX bean (setter?? from the
+          // java docs) threw an exception, so log it and fall back on the 
+          // class name
+          LOG.error("getting attribute " + prs + " of " + oname
+              + " threw an exception", e);
+        }
+      } catch (InstanceNotFoundException e) {
+        //Ignored for some reason the bean was not found so don't output it
+        continue;
+      } catch ( IntrospectionException e ) {
+        // This is an internal error, something odd happened with reflection so
+        // log it and don't output the bean.
+        LOG.error("Problem while trying to process JMX query: " + qry
+            + " with MBean " + oname, e);
+        continue;
+      } catch ( ReflectionException e ) {
+        // This happens when the code inside the JMX bean threw an exception, so
+        // log it and don't output the bean.
+        LOG.error("Problem while trying to process JMX query: " + qry
+            + " with MBean " + oname, e);
+        continue;
+      }
+
+      jg.writeStartObject();
+      jg.writeStringField("name", oname.toString());
+
+      jg.writeStringField("modelerType", code);
+      if ((attribute != null) && (attributeinfo == null)) {
+        jg.writeStringField("result", "ERROR");
+        jg.writeStringField("message", "No attribute with name " + attribute
+            + " was found.");
+        jg.writeEndObject();
+        jg.writeEndArray();
+        jg.close();
+        response.setStatus(HttpServletResponse.SC_NOT_FOUND);
+        return;
+      }
+
+      if (attribute != null) {
+        writeAttribute(jg, attribute, attributeinfo);
+      } else {
+        MBeanAttributeInfo attrs[] = minfo.getAttributes();
+        for (int i = 0; i < attrs.length; i++) {
+          writeAttribute(jg, oname, attrs[i]);
+        }
+      }
+      jg.writeEndObject();
+    }
+    jg.writeEndArray();
+  }
+
+  private void writeAttribute(JsonGenerator jg, ObjectName oname, MBeanAttributeInfo attr) throws IOException {
+    if (!attr.isReadable()) {
+      return;
+    }
+    String attName = attr.getName();
+    if ("modelerType".equals(attName)) {
+      return;
+    }
+    if (attName.indexOf("=") >= 0 || attName.indexOf(":") >= 0
+        || attName.indexOf(" ") >= 0) {
+      return;
+    }
+    Object value = null;
+    try {
+      value = mBeanServer.getAttribute(oname, attName);
+    } catch (RuntimeMBeanException e) {
+      // UnsupportedOperationExceptions happen in the normal course of business,
+      // so no need to log them as errors all the time.
+      if (e.getCause() instanceof UnsupportedOperationException) {
+        LOG.debug("getting attribute "+attName+" of "+oname+" threw an exception", e);
+      } else {
+        LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e);
+      }
+      return;
+    } catch (RuntimeErrorException e) {
+      // RuntimeErrorException happens when an unexpected failure occurs in getAttribute
+      // for example https://issues.apache.org/jira/browse/DAEMON-120
+      LOG.debug("getting attribute "+attName+" of "+oname+" threw an exception", e);
+      return;
+    } catch (AttributeNotFoundException e) {
+      //Ignored the attribute was not found, which should never happen because the bean
+      //just told us that it has this attribute, but if this happens just don't output
+      //the attribute.
+      return;
+    } catch (MBeanException e) {
+      //The code inside the attribute getter threw an exception so log it, and
+      // skip outputting the attribute
+      LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e);
+      return;
+    } catch (RuntimeException e) {
+      //For some reason even with an MBeanException available to them Runtime exceptions
+      //can still find their way through, so treat them the same as MBeanException
+      LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e);
+      return;
+    } catch (ReflectionException e) {
+      //This happens when the code inside the JMX bean (setter?? from the java docs)
+      //threw an exception, so log it and skip outputting the attribute
+      LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e);
+      return;
+    } catch (InstanceNotFoundException e) {
+      //Ignored the mbean itself was not found, which should never happen because we
+      //just accessed it (perhaps something unregistered in-between) but if this
+      //happens just don't output the attribute.
+      return;
+    }
+
+    writeAttribute(jg, attName, value);
+  }
+
+  private void writeAttribute(JsonGenerator jg, String attName, Object value) throws IOException {
+    jg.writeFieldName(attName);
+    writeObject(jg, value);
+  }
+
+  private void writeObject(JsonGenerator jg, Object value) throws IOException {
+    if(value == null) {
+      jg.writeNull();
+    } else {
+      Class<?> c = value.getClass();
+      if (c.isArray()) {
+        jg.writeStartArray();
+        int len = Array.getLength(value);
+        for (int j = 0; j < len; j++) {
+          Object item = Array.get(value, j);
+          writeObject(jg, item);
+        }
+        jg.writeEndArray();
+      } else if(value instanceof Number) {
+        Number n = (Number)value;
+        jg.writeNumber(n.toString());
+      } else if(value instanceof Boolean) {
+        Boolean b = (Boolean)value;
+        jg.writeBoolean(b);
+      } else if(value instanceof CompositeData) {
+        CompositeData cds = (CompositeData)value;
+        CompositeType comp = cds.getCompositeType();
+        Set<String> keys = comp.keySet();
+        jg.writeStartObject();
+        for(String key: keys) {
+          writeAttribute(jg, key, cds.get(key));
+        }
+        jg.writeEndObject();
+      } else if(value instanceof TabularData) {
+        TabularData tds = (TabularData)value;
+        jg.writeStartArray();
+        for(Object entry : tds.values()) {
+          writeObject(jg, entry);
+        }
+        jg.writeEndArray();
+      } else {
+        jg.writeString(value.toString());
+      }
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 34bdbf6..22b4ca8 100644
--- a/pom.xml
+++ b/pom.xml
@@ -134,6 +134,7 @@
     <jackson.version>1.9.2</jackson.version>
     <!-- jackson 1 and 2 lines can coexist without issue, as they have different artifactIds -->
     <jackson.new.version>2.4.2</jackson.new.version>
+    <jasper.version>5.5.23</jasper.version>
     <javaewah.version>0.3.2</javaewah.version>
     <javolution.version>5.5.1</javolution.version>
     <jdo-api.version>3.0.1</jdo-api.version>

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/ql/pom.xml
----------------------------------------------------------------------
diff --git a/ql/pom.xml b/ql/pom.xml
index d893099..145ba9b 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -44,6 +44,12 @@
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-common</artifactId>
       <version>${project.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.eclipse.jetty.aggregate</groupId>
+          <artifactId>jetty-all</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/pom.xml
----------------------------------------------------------------------
diff --git a/service/pom.xml b/service/pom.xml
index afa52cf..ebc5966 100644
--- a/service/pom.xml
+++ b/service/pom.xml
@@ -82,6 +82,16 @@
         <version>${jetty.version}</version>
     </dependency>
     <dependency>
+      <groupId>tomcat</groupId>
+      <artifactId>jasper-compiler</artifactId>
+      <version>${jasper.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>tomcat</groupId>
+      <artifactId>jasper-runtime</artifactId>
+      <version>${jasper.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.thrift</groupId>
       <artifactId>libfb303</artifactId>
       <version>${libfb303.version}</version>
@@ -140,6 +150,14 @@
   <build>
     <sourceDirectory>${basedir}/src/java</sourceDirectory>
     <testSourceDirectory>${basedir}/src/test</testSourceDirectory>
+    <resources>
+      <resource>
+        <directory>${project.build.directory}</directory>
+        <includes>
+          <include>hive-webapps/**</include>
+        </includes>
+      </resource>
+    </resources>
     <plugins>
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
@@ -155,12 +173,50 @@
               <sources>
                 <source>src/model</source>
                 <source>src/gen/thrift/gen-javabean</source>
+                <source>${project.build.directory}/generated-sources/java</source>
               </sources>
             </configuration>
           </execution>
         </executions>
       </plugin>
       <plugin>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <!-- Generate web app sources -->
+          <execution>
+            <id>generate</id>
+            <phase>generate-sources</phase>
+            <configuration>
+              <target>
+                <property name="build.webapps"
+                  location="${project.build.directory}/hive-webapps"/>
+                <property name="src.webapps"
+                  location="${basedir}/src/resources/hive-webapps"/>
+                <property name="generated.sources"
+                  location="${project.build.directory}/generated-sources"/>
+                <mkdir dir="${build.webapps}"/>
+                <copy todir="${build.webapps}">
+                  <fileset dir="${src.webapps}">
+                    <exclude name="**/*.jsp"/>
+                    <exclude name="**/.*"/>
+                  </fileset>
+                </copy>
+                <taskdef classname="org.apache.jasper.JspC"
+                  name="jspcompiler" classpathref="maven.compile.classpath"/>
+                <mkdir dir="${build.webapps}/hiveserver2/WEB-INF"/>
+                <jspcompiler uriroot="${src.webapps}/hiveserver2"
+                  outputdir="${generated.sources}/java"
+                  package="org.apache.hive.generated.hiveserver2"
+                  webxml="${build.webapps}/hiveserver2/WEB-INF/web.xml"/>
+              </target>
+            </configuration>
+            <goals>
+              <goal>run</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-jar-plugin</artifactId>
         <executions>

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/java/org/apache/hive/service/cli/operation/Operation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/Operation.java b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
index 25cefc2..d2b3f9c 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/Operation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
@@ -73,7 +73,7 @@ public abstract class Operation {
   protected Map<String, String> confOverlay = new HashMap<String, String>();
 
   private long operationTimeout;
-  private long lastAccessTime;
+  private volatile long lastAccessTime;
 
   protected static final EnumSet<FetchOrientation> DEFAULT_FETCH_ORIENTATION_SET =
       EnumSet.of(FetchOrientation.FETCH_NEXT,FetchOrientation.FETCH_FIRST);

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
index b0bd351..92135cd 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
@@ -20,12 +20,12 @@ package org.apache.hive.service.cli.operation;
 
 import java.sql.SQLException;
 import java.util.ArrayList;
-import java.util.HashMap;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.metrics.common.Metrics;
 import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
@@ -49,6 +49,7 @@ import org.apache.logging.log4j.core.Appender;
 import org.apache.logging.log4j.core.LoggerContext;
 import org.apache.logging.log4j.core.config.Configuration;
 import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
@@ -57,8 +58,8 @@ import org.slf4j.LoggerFactory;
  */
 public class OperationManager extends AbstractService {
   private final Logger LOG = LoggerFactory.getLogger(OperationManager.class.getName());
-  private final Map<OperationHandle, Operation> handleToOperation =
-      new HashMap<OperationHandle, Operation>();
+  private final ConcurrentHashMap<OperationHandle, Operation> handleToOperation =
+      new ConcurrentHashMap<OperationHandle, Operation>();
 
   public OperationManager() {
     super(OperationManager.class.getSimpleName());
@@ -165,24 +166,24 @@ public class OperationManager extends AbstractService {
     return operation;
   }
 
-  private synchronized Operation getOperationInternal(OperationHandle operationHandle) {
+  private Operation getOperationInternal(OperationHandle operationHandle) {
     return handleToOperation.get(operationHandle);
   }
 
-  private synchronized Operation removeTimedOutOperation(OperationHandle operationHandle) {
+  private Operation removeTimedOutOperation(OperationHandle operationHandle) {
     Operation operation = handleToOperation.get(operationHandle);
     if (operation != null && operation.isTimedOut(System.currentTimeMillis())) {
-      handleToOperation.remove(operationHandle);
+      handleToOperation.remove(operationHandle, operation);
       return operation;
     }
     return null;
   }
 
-  private synchronized void addOperation(Operation operation) {
+  private void addOperation(Operation operation) {
     handleToOperation.put(operation.getHandle(), operation);
   }
 
-  private synchronized Operation removeOperation(OperationHandle opHandle) {
+  private Operation removeOperation(OperationHandle opHandle) {
     return handleToOperation.remove(opHandle);
   }
 
@@ -292,6 +293,11 @@ public class OperationManager extends AbstractService {
     return schema;
   }
 
+  public Collection<Operation> getOperations() {
+    return Collections.unmodifiableCollection(handleToOperation.values());
+  }
+
+
   public OperationLog getOperationLogByThread() {
     return OperationLog.getCurrentOperationLog();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
index 1331a99..d90dd0d 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -162,12 +162,16 @@ public class SQLOperation extends ExecuteStatementOperation {
     } catch (HiveSQLException e) {
       setState(OperationState.ERROR);
       throw e;
-    } catch (Exception e) {
+    } catch (Throwable e) {
       setState(OperationState.ERROR);
       throw new HiveSQLException("Error running query: " + e.toString(), e);
     }
   }
 
+  public String getQueryStr() {
+    return driver == null || driver.getPlan() == null ? "Unknown" : driver.getPlan().getQueryStr();
+  }
+
   private void runQuery(HiveConf sqlOperationConf) throws HiveSQLException {
     try {
       // In Hive server mode, we are not able to retry in the FetchTask
@@ -190,7 +194,7 @@ public class SQLOperation extends ExecuteStatementOperation {
         setState(OperationState.ERROR);
         throw e;
       }
-    } catch (Exception e) {
+    } catch (Throwable e) {
       setState(OperationState.ERROR);
       throw new HiveSQLException("Error running query: " + e.toString(), e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java
index 1ab5652..5d12a85 100644
--- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java
+++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java
@@ -85,4 +85,8 @@ public interface HiveSessionBase {
   void setIpAddress(String ipAddress);
 
   long getLastAccessTime();
+
+  long getCreationTime();
+
+  int getOpenOperationCount();
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
index a14908b..50c912e 100644
--- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
+++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
@@ -85,6 +85,7 @@ public class HiveSessionImpl implements HiveSession {
   private String username;
   private final String password;
   private final HiveConf hiveConf;
+  private final long creationTime;
   // TODO: some SessionState internals are not thread safe. The compile-time internals are synced
   //       via session-scope or global compile lock. The run-time internals work by magic!
   //       They probably work because races are relatively unlikely and few tools run parallel
@@ -114,6 +115,7 @@ public class HiveSessionImpl implements HiveSession {
       HiveConf serverhiveConf, String ipAddress) {
     this.username = username;
     this.password = password;
+    creationTime = System.currentTimeMillis();
     this.sessionHandle = new SessionHandle(protocol);
     this.hiveConf = new HiveConf(serverhiveConf);
     this.ipAddress = ipAddress;
@@ -697,6 +699,11 @@ public class HiveSessionImpl implements HiveSession {
   }
 
   @Override
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  @Override
   public void closeExpiredOperations() {
     OperationHandle[] handles;
     synchronized (opHandleSet) {
@@ -785,6 +792,11 @@ public class HiveSessionImpl implements HiveSession {
   }
 
   @Override
+  public int getOpenOperationCount() {
+    return opHandleSet.size();
+  }
+
+  @Override
   public String getIpAddress() {
     return ipAddress;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
index d11cf3d..637cdca 100644
--- a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
+++ b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
@@ -22,6 +22,8 @@ import java.io.File;
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.Date;
 import java.util.List;
 import java.util.Map;
@@ -29,7 +31,6 @@ import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
@@ -38,18 +39,19 @@ import org.apache.hadoop.hive.common.metrics.common.Metrics;
 import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.hooks.HookUtils;
 import org.apache.hive.service.CompositeService;
 import org.apache.hive.service.cli.HiveSQLException;
 import org.apache.hive.service.cli.SessionHandle;
+import org.apache.hive.service.cli.operation.Operation;
 import org.apache.hive.service.cli.operation.OperationManager;
 import org.apache.hive.service.cli.thrift.TProtocolVersion;
 import org.apache.hive.service.server.HiveServer2;
 import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * SessionManager.
@@ -452,6 +454,14 @@ public class SessionManager extends CompositeService {
     return backgroundOperationPool.submit(r);
   }
 
+  public Collection<Operation> getOperations() {
+    return operationManager.getOperations();
+  }
+
+  public Collection<HiveSession> getSessions() {
+    return Collections.unmodifiableCollection(handleToSession.values());
+  }
+
   public int getOpenSessionCount() {
     return handleToSession.size();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/java/org/apache/hive/service/server/HiveServer2.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java
index b30b6a2..204eb5a 100644
--- a/service/src/java/org/apache/hive/service/server/HiveServer2.java
+++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java
@@ -56,9 +56,12 @@ import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager;
 import org.apache.hadoop.hive.ql.util.ZooKeeperHiveHelper;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hive.common.util.HiveStringUtils;
 import org.apache.hive.common.util.HiveVersionInfo;
+import org.apache.hive.http.HttpServer;
 import org.apache.hive.service.CompositeService;
+import org.apache.hive.service.ServiceException;
 import org.apache.hive.service.cli.CLIService;
 import org.apache.hive.service.cli.thrift.ThriftBinaryCLIService;
 import org.apache.hive.service.cli.thrift.ThriftCLIService;
@@ -86,6 +89,7 @@ public class HiveServer2 extends CompositeService {
   private String znodePath;
   private CuratorFramework zooKeeperClient;
   private boolean registeredWithZooKeeper = false;
+  private HttpServer webServer; // Web UI
 
   public HiveServer2() {
     super(HiveServer2.class.getSimpleName());
@@ -116,6 +120,32 @@ public class HiveServer2 extends CompositeService {
     } catch (Throwable t) {
       throw new Error("Unable to intitialize HiveServer2", t);
     }
+    // Setup web UI
+    try {
+      if (hiveConf.getBoolVar(ConfVars.HIVE_IN_TEST)) {
+        LOG.info("Web UI is disabled since in test mode");
+      } else {
+        int webUIPort =
+          hiveConf.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_PORT);
+        if (webUIPort <= 0) {
+          LOG.info("Web UI is disabled since port is set to " + webUIPort);
+        } else {
+          AccessControlList adminsAcl =
+            new AccessControlList(hiveConf.getVar(ConfVars.USERS_IN_ADMIN_ROLE));
+          hiveConf.set("startcode", String.valueOf(System.currentTimeMillis()));
+          webServer = new HttpServer("hiveserver2",
+            hiveConf.getVar(ConfVars.HIVE_SERVER2_WEBUI_BIND_HOST),
+            webUIPort,
+            hiveConf.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_MAX_THREADS),
+            hiveConf, adminsAcl);
+          // SessionManager is initialized
+          webServer.setContextAttribute("hive.sm",
+            cliService.getSessionManager());
+        }
+      }
+    } catch (IOException ie) {
+      throw new ServiceException(ie);
+    }
     // Add a shutdown hook for catching SIGTERM & SIGINT
     Runtime.getRuntime().addShutdownHook(new Thread() {
       @Override
@@ -371,6 +401,15 @@ public class HiveServer2 extends CompositeService {
   @Override
   public synchronized void start() {
     super.start();
+    if (webServer != null) {
+      try {
+        webServer.start();
+        LOG.info("Web UI has started on port " + webServer.getPort());
+      } catch (Exception e) {
+        LOG.error("Error starting Web UI: ", e);
+        throw new ServiceException(e);
+      }
+    }
   }
 
   @Override
@@ -378,6 +417,14 @@ public class HiveServer2 extends CompositeService {
     LOG.info("Shutting down HiveServer2");
     HiveConf hiveConf = this.getHiveConf();
     super.stop();
+    if (webServer != null) {
+      try {
+        webServer.stop();
+        LOG.info("Web UI has stopped");
+      } catch (Exception e) {
+        LOG.error("Error stopping Web UI: ", e);
+      }
+    }
     // Shutdown Metrics
     if (MetricsFactory.getInstance() != null) {
       try {

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp b/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
new file mode 100644
index 0000000..4fad63c
--- /dev/null
+++ b/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
@@ -0,0 +1,186 @@
+<%--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+--%>
+<%@ page contentType="text/html;charset=UTF-8"
+  import="org.apache.hadoop.conf.Configuration"
+  import="org.apache.hive.common.util.HiveVersionInfo"
+  import="org.apache.hive.service.cli.operation.Operation"
+  import="org.apache.hive.service.cli.operation.SQLOperation"
+  import="org.apache.hive.service.cli.session.SessionManager"
+  import="org.apache.hive.service.cli.session.HiveSession"
+  import="javax.servlet.ServletContext"
+  import="java.util.Collection"
+  import="java.util.Date"
+%>
+
+<%
+ServletContext ctx = getServletContext();
+Configuration conf = (Configuration)ctx.getAttribute("hive.conf");
+long startcode = conf.getLong("startcode", System.currentTimeMillis());
+SessionManager sessionManager =
+  (SessionManager)ctx.getAttribute("hive.sm");
+%>
+
+<!--[if IE]>
+<!DOCTYPE html>
+<![endif]-->
+<?xml version="1.0" encoding="UTF-8" ?>
+<html lang="en">
+  <head>
+    <meta charset="utf-8">
+    <title>HiveServer2</title>
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
+    <meta name="description" content="">
+
+    <link href="/static/css/bootstrap.min.css" rel="stylesheet">
+    <link href="/static/css/bootstrap-theme.min.css" rel="stylesheet">
+    <link href="/static/css/hive.css" rel="stylesheet">
+  </head>
+
+  <body>
+  <div class="navbar  navbar-fixed-top navbar-default">
+      <div class="container">
+          <div class="navbar-header">
+              <button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
+                  <span class="icon-bar"></span>
+                  <span class="icon-bar"></span>
+                  <span class="icon-bar"></span>
+              </button>
+              <a class="navbar-brand" href="/hiveserver2.jsp"><img src="/static/hive_logo.jpeg" alt="Hive Logo"/></a>
+          </div>
+          <div class="collapse navbar-collapse">
+              <ul class="nav navbar-nav">
+                <li class="active"><a href="/">Home</a></li>
+                <li><a href="/logs/">Local logs</a></li>
+                <li><a href="/jmx">Metrics Dump</a></li>
+                <li><a href="/conf">Hive Configuration</a></li>
+            </ul>
+          </div><!--/.nav-collapse -->
+        </div>
+      </div>
+    </div>
+
+<div class="container">
+    <div class="row inner_header">
+        <div class="page-header">
+            <h1>HiveServer2</h1>
+        </div>
+    </div>
+    <div class="row">
+
+<%
+if (sessionManager != null) { 
+  long currentTime = System.currentTimeMillis();
+%> 
+
+<section>
+<h2>Active Sessions</h2>
+<table id="attributes_table" class="table table-striped">
+    <tr>
+        <th>User Name</th>
+        <th>IP Address</th>
+        <th>Operation Count</th>
+        <th>Active Time (s)</th>
+        <th>Idle Time (s)</th>
+    </tr>
+<%
+Collection<HiveSession> hiveSessions = sessionManager.getSessions();
+for (HiveSession hiveSession: hiveSessions) {
+%>
+    <tr>
+        <td><%= hiveSession.getUserName() %></td>
+        <td><%= hiveSession.getIpAddress() %></td>
+        <td><%= hiveSession.getOpenOperationCount() %></td>
+        <td><%= (currentTime - hiveSession.getCreationTime())/1000 %></td>
+        <td><%= (currentTime - hiveSession.getLastAccessTime())/1000 %></td>
+    </tr>
+<%
+}
+%>
+<tr>
+  <td colspan="5">Total number of sessions: <%= hiveSessions.size() %></td>
+</tr>
+</table>
+</section>
+
+<section>
+<h2>Queries</h2>
+<table id="attributes_table" class="table table-striped">
+    <tr>
+        <th>User Name</th>
+        <th>Query</th>
+        <th>State</th>
+        <th>Elapsed Time (s)</th>
+    </tr>
+<%
+int queries = 0;
+Collection<Operation> operations = sessionManager.getOperations();
+for (Operation operation: operations) {
+  if (operation instanceof SQLOperation) {
+    SQLOperation query = (SQLOperation) operation;
+    queries++;
+%>
+    <tr>
+        <td><%= query.getParentSession().getUserName() %></td>
+        <td><%= query.getQueryStr() %></td>
+        <td><%= query.getStatus().getState() %></td>
+        <td><%= (currentTime - query.getLastAccessTime())/1000 %></td>
+    </tr>
+<%
+  }
+}
+%>
+<tr>
+  <td colspan="4">Total number of queries: <%= queries %></td>
+</tr>
+</table>
+</section>
+<% 
+ }
+%>
+
+    <section>
+    <h2>Software Attributes</h2>
+    <table id="attributes_table" class="table table-striped">
+        <tr>
+            <th>Attribute Name</th>
+            <th>Value</th>
+            <th>Description</th>
+        </tr>
+        <tr>
+            <td>Hive Version</td>
+            <td><%= HiveVersionInfo.getVersion() %>, r<%= HiveVersionInfo.getRevision() %></td>
+            <td>Hive version and revision</td>
+        </tr>
+        <tr>
+            <td>Hive Compiled</td>
+            <td><%= HiveVersionInfo.getDate() %>, <%= HiveVersionInfo.getUser() %></td>
+            <td>When Hive was compiled and by whom</td>
+        </tr>
+        <tr>
+            <td>HiveServer2 Start Time</td>
+            <td><%= new Date(startcode) %></td>
+            <td>Date stamp of when this HiveServer2 was started</td>
+        </tr>
+    </table>
+    </section>
+    </div>
+</div>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/resources/hive-webapps/hiveserver2/index.html
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/hiveserver2/index.html b/service/src/resources/hive-webapps/hiveserver2/index.html
new file mode 100644
index 0000000..f18ba53
--- /dev/null
+++ b/service/src/resources/hive-webapps/hiveserver2/index.html
@@ -0,0 +1,20 @@
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<meta HTTP-EQUIV="REFRESH" content="0;url=/hiveserver2.jsp"/>

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/resources/hive-webapps/static/css/bootstrap-theme.min.css
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/static/css/bootstrap-theme.min.css b/service/src/resources/hive-webapps/static/css/bootstrap-theme.min.css
new file mode 100755
index 0000000..c31428b
--- /dev/null
+++ b/service/src/resources/hive-webapps/static/css/bootstrap-theme.min.css
@@ -0,0 +1,10 @@
+/*!
+ * Bootstrap v3.0.0
+ *
+ * Copyright 2013 Twitter, Inc
+ * Licensed under the Apache License v2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Designed and built with all the love in the world by @mdo and @fat.
+ */
+.btn-default,.btn-primary,.btn-success,.btn-info,.btn-warning,.btn-danger{text-shadow:0 -1px 0 rgba(0,0,0,0.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 1px rgba(0,0,0,0.075)}.btn-default:active,.btn-primary:active,.btn-success:active,.btn-info:active,.btn-warning:active,.btn-danger:active,.btn-default.active,.btn-primary.active,.btn-success.active,.btn-info.active,.btn-warning.active,.btn-danger.active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn:active,.btn.active{background-image:none}.btn-default{text-shadow:0 1px 0 #fff;background-image:-webkit-gradient(linear,left 0,left 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,0%,#e6e6e6,100%);background-image:-moz-linear-gradient(top,#fff 0,#e6e6e6 100%);background-image:linear-gradient(to bottom,#fff 0,#e6e6e6 100%);background-repeat:repeat-x;border-co
 lor:#e0e0e0;border-color:#ccc;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#ffe6e6e6',GradientType=0)}.btn-default:active,.btn-default.active{background-color:#e6e6e6;border-color:#e0e0e0}.btn-primary{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3071a9));background-image:-webkit-linear-gradient(top,#428bca,0%,#3071a9,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;border-color:#2d6ca2;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3071a9',GradientType=0)}.btn-primary:active,.btn-primary.active{background-color:#3071a9;border-color:#2d6ca2}.btn-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5cb85c),to(#449d44));background-image:-webkit-linear-gradient(top,#5cb85c,0%,#449d44,100%);background-image:-moz-linear-gradient(top,#5cb
 85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;border-color:#419641;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c',endColorstr='#ff449d44',GradientType=0)}.btn-success:active,.btn-success.active{background-color:#449d44;border-color:#419641}.btn-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f0ad4e),to(#ec971f));background-image:-webkit-linear-gradient(top,#f0ad4e,0%,#ec971f,100%);background-image:-moz-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;border-color:#eb9316;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e',endColorstr='#ffec971f',GradientType=0)}.btn-warning:active,.btn-warning.active{background-color:#ec971f;border-color:#eb9316}.btn-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9534f),to(#c9302c));background-i
 mage:-webkit-linear-gradient(top,#d9534f,0%,#c9302c,100%);background-image:-moz-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;border-color:#c12e2a;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f',endColorstr='#ffc9302c',GradientType=0)}.btn-danger:active,.btn-danger.active{background-color:#c9302c;border-color:#c12e2a}.btn-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5bc0de),to(#31b0d5));background-image:-webkit-linear-gradient(top,#5bc0de,0%,#31b0d5,100%);background-image:-moz-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;border-color:#2aabd2;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff31b0d5',GradientType=0)}.btn-info:active,.btn-info.active{background-color:#31b0d5;border-color:#2aabd2}.thumbnail,.img-
 thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.075);box-shadow:0 1px 2px rgba(0,0,0,0.075)}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus,.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{background-color:#357ebd;background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#357ebd));background-image:-webkit-linear-gradient(top,#428bca,0%,#357ebd,100%);background-image:-moz-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff357ebd',GradientType=0)}.navbar{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fff),to(#f8f8f8));background-image:-webkit-linear-gradient(top,#fff,0%,#f8f8f8,100%);background-image:-moz-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);background-repeat:repe
 at-x;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#fff8f8f8',GradientType=0);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 5px rgba(0,0,0,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.15),0 1px 5px rgba(0,0,0,0.075)}.navbar .navbar-nav>.active>a{background-color:#f8f8f8}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,0.25)}.navbar-inverse{background-image:-webkit-gradient(linear,left 0,left 100%,from(#3c3c3c),to(#222));background-image:-webkit-linear-gradient(top,#3c3c3c,0%,#222,100%);background-image:-moz-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c',endColorstr='#ff222222',GradientType=0)}.navbar-inverse .navbar-nav>.active>a{background-color:#222}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow
 :0 -1px 0 rgba(0,0,0,0.25)}.navbar-static-top,.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}.alert{text-shadow:0 1px 0 rgba(255,255,255,0.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.25),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 1px 0 rgba(255,255,255,0.25),0 1px 2px rgba(0,0,0,0.05)}.alert-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#dff0d8),to(#c8e5bc));background-image:-webkit-linear-gradient(top,#dff0d8,0%,#c8e5bc,100%);background-image:-moz-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);background-repeat:repeat-x;border-color:#b2dba1;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8',endColorstr='#ffc8e5bc',GradientType=0)}.alert-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9edf7),to(#b9def0));background-image:-webkit-linear-gradient(top,#d9edf7,0%,#b9def0,100%);background-image:-moz-linear-gradient(top,#d9edf7 0,#b9
 def0 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);background-repeat:repeat-x;border-color:#9acfea;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7',endColorstr='#ffb9def0',GradientType=0)}.alert-warning{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fcf8e3),to(#f8efc0));background-image:-webkit-linear-gradient(top,#fcf8e3,0%,#f8efc0,100%);background-image:-moz-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);background-repeat:repeat-x;border-color:#f5e79e;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3',endColorstr='#fff8efc0',GradientType=0)}.alert-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f2dede),to(#e7c3c3));background-image:-webkit-linear-gradient(top,#f2dede,0%,#e7c3c3,100%);background-image:-moz-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:linear-gradient(to bottom,#f2dede 0,
 #e7c3c3 100%);background-repeat:repeat-x;border-color:#dca7a7;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede',endColorstr='#ffe7c3c3',GradientType=0)}.progress{background-image:-webkit-gradient(linear,left 0,left 100%,from(#ebebeb),to(#f5f5f5));background-image:-webkit-linear-gradient(top,#ebebeb,0%,#f5f5f5,100%);background-image:-moz-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb',endColorstr='#fff5f5f5',GradientType=0)}.progress-bar{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3071a9));background-image:-webkit-linear-gradient(top,#428bca,0%,#3071a9,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient
 (startColorstr='#ff428bca',endColorstr='#ff3071a9',GradientType=0)}.progress-bar-success{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5cb85c),to(#449d44));background-image:-webkit-linear-gradient(top,#5cb85c,0%,#449d44,100%);background-image:-moz-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c',endColorstr='#ff449d44',GradientType=0)}.progress-bar-info{background-image:-webkit-gradient(linear,left 0,left 100%,from(#5bc0de),to(#31b0d5));background-image:-webkit-linear-gradient(top,#5bc0de,0%,#31b0d5,100%);background-image:-moz-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff31b0d5',GradientType=0)}.progress-bar-warning{backg
 round-image:-webkit-gradient(linear,left 0,left 100%,from(#f0ad4e),to(#ec971f));background-image:-webkit-linear-gradient(top,#f0ad4e,0%,#ec971f,100%);background-image:-moz-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e',endColorstr='#ffec971f',GradientType=0)}.progress-bar-danger{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9534f),to(#c9302c));background-image:-webkit-linear-gradient(top,#d9534f,0%,#c9302c,100%);background-image:-moz-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f',endColorstr='#ffc9302c',GradientType=0)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.075);box-shadow:0 1px 2px rgba(0,0,0,0.075)}.li
 st-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{text-shadow:0 -1px 0 #3071a9;background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#3278b3));background-image:-webkit-linear-gradient(top,#428bca,0%,#3278b3,100%);background-image:-moz-linear-gradient(top,#428bca 0,#3278b3 100%);background-image:linear-gradient(to bottom,#428bca 0,#3278b3 100%);background-repeat:repeat-x;border-color:#3278b3;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff3278b3',GradientType=0)}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.05);box-shadow:0 1px 2px rgba(0,0,0,0.05)}.panel-default>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#f5f5f5),to(#e8e8e8));background-image:-webkit-linear-gradient(top,#f5f5f5,0%,#e8e8e8,100%);background-image:-moz-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);background-repeat:repeat-x
 ;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#ffe8e8e8',GradientType=0)}.panel-primary>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#428bca),to(#357ebd));background-image:-webkit-linear-gradient(top,#428bca,0%,#357ebd,100%);background-image:-moz-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca',endColorstr='#ff357ebd',GradientType=0)}.panel-success>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#dff0d8),to(#d0e9c6));background-image:-webkit-linear-gradient(top,#dff0d8,0%,#d0e9c6,100%);background-image:-moz-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8
 ',endColorstr='#ffd0e9c6',GradientType=0)}.panel-info>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#d9edf7),to(#c4e3f3));background-image:-webkit-linear-gradient(top,#d9edf7,0%,#c4e3f3,100%);background-image:-moz-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7',endColorstr='#ffc4e3f3',GradientType=0)}.panel-warning>.panel-heading{background-image:-webkit-gradient(linear,left 0,left 100%,from(#fcf8e3),to(#faf2cc));background-image:-webkit-linear-gradient(top,#fcf8e3,0%,#faf2cc,100%);background-image:-moz-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3',endColorstr='#fffaf2cc',GradientType=0)}.panel-danger>.panel-heading{backgro
 und-image:-webkit-gradient(linear,left 0,left 100%,from(#f2dede),to(#ebcccc));background-image:-webkit-linear-gradient(top,#f2dede,0%,#ebcccc,100%);background-image:-moz-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede',endColorstr='#ffebcccc',GradientType=0)}.well{background-image:-webkit-gradient(linear,left 0,left 100%,from(#e8e8e8),to(#f5f5f5));background-image:-webkit-linear-gradient(top,#e8e8e8,0%,#f5f5f5,100%);background-image:-moz-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);background-repeat:repeat-x;border-color:#dcdcdc;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8',endColorstr='#fff5f5f5',GradientType=0);-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,0.05),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 3px rgba(0
 ,0,0,0.05),0 1px 0 rgba(255,255,255,0.1)}
\ No newline at end of file


[19/27] hive git commit: HIVE-12496 : Open ServerTransport After MetaStore Initialization (Nemon Lou via Ashutosh Chauhan)

Posted by om...@apache.org.
HIVE-12496 : Open ServerTransport After MetaStore Initialization  (Nemon Lou via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6b725741
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6b725741
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6b725741

Branch: refs/heads/master-fixed
Commit: 6b7257410211299eba9aa89ca51ed0b21489d99e
Parents: a8e61c2
Author: Nemon Lou <lo...@huawei.com>
Authored: Thu Nov 26 11:44:34 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:37 2015 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/metastore/HiveMetaStore.java    | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6b725741/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index a835f6a..00602e1 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -5978,8 +5978,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       boolean useCompactProtocol = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_COMPACT_PROTOCOL);
       useSasl = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL);
 
-      TServerTransport serverTransport = tcpKeepAlive ?
-          new TServerSocketKeepAlive(port) : new TServerSocket(port);
 
       TProcessor processor;
       TTransportFactory transFactory;
@@ -6027,6 +6025,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           LOG.info("Starting DB backed MetaStore Server");
         }
       }
+ 
+       TServerTransport serverTransport = tcpKeepAlive ?
+        new TServerSocketKeepAlive(port) : new TServerSocket(port);
 
       TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverTransport)
           .processor(processor)


[22/27] hive git commit: HIVE-12008: Hive queries failing when using count(*) on column in view (Yongzhi Chen, reviewed by Szehon ho)

Posted by om...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/union9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union9.q.out b/ql/src/test/results/clientpositive/union9.q.out
index ec7ab35..5f54210 100644
--- a/ql/src/test/results/clientpositive/union9.q.out
+++ b/ql/src/test/results/clientpositive/union9.q.out
@@ -24,73 +24,67 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_view.q.out b/ql/src/test/results/clientpositive/union_view.q.out
index c55dee6..66ca51b 100644
--- a/ql/src/test/results/clientpositive/union_view.q.out
+++ b/ql/src/test/results/clientpositive/union_view.q.out
@@ -872,8 +872,6 @@ STAGE PLANS:
             filterExpr: (ds = '1') (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: '1' (type: string)
-              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Union
                 Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -896,8 +894,6 @@ STAGE PLANS:
               predicate: (ds = '1') (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: '1' (type: string)
-                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Union
                   Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -920,8 +916,6 @@ STAGE PLANS:
               predicate: (ds = '1') (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: '1' (type: string)
-                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Union
                   Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -996,8 +990,6 @@ STAGE PLANS:
               predicate: (ds = '2') (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: '2' (type: string)
-                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Union
                   Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1017,8 +1009,6 @@ STAGE PLANS:
             filterExpr: (ds = '2') (type: boolean)
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: '2' (type: string)
-              outputColumnNames: _col0
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
               Union
                 Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1041,8 +1031,6 @@ STAGE PLANS:
               predicate: (ds = '2') (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: '2' (type: string)
-                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Union
                   Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1117,8 +1105,6 @@ STAGE PLANS:
               predicate: (ds = '3') (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: '3' (type: string)
-                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Union
                   Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1141,8 +1127,6 @@ STAGE PLANS:
               predicate: (ds = '3') (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: '3' (type: string)
-                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Union
                   Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1162,8 +1146,6 @@ STAGE PLANS:
             filterExpr: (ds = '3') (type: boolean)
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: '3' (type: string)
-              outputColumnNames: _col0
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
               Union
                 Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1355,8 +1337,6 @@ STAGE PLANS:
               predicate: (ds = '4') (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: '4' (type: string)
-                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Union
                   Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1379,8 +1359,6 @@ STAGE PLANS:
               predicate: (ds = '4') (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: '4' (type: string)
-                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Union
                   Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1400,8 +1378,6 @@ STAGE PLANS:
             filterExpr: (ds = '4') (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: '4' (type: string)
-              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Union
                 Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out b/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out
index 1562087..952eef9 100644
--- a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out
+++ b/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out
@@ -386,8 +386,6 @@ STAGE PLANS:
               predicate: (f1 = 1) (type: boolean)
               Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: 1 (type: int)
-                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Union
                   Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: NONE
@@ -409,8 +407,6 @@ STAGE PLANS:
               predicate: (f1 = 1) (type: boolean)
               Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: 1 (type: int)
-                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
                 Union
                   Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: NONE
@@ -474,146 +470,127 @@ POSTHOOK: Input: default@union_all_bug_test_2
 #### A masked pattern was here ####
 1
 1
-PREHOOK: query: explain
-
-SELECT f1
-FROM (
-
-SELECT
-f1
-, if('helloworld' like '%hello%' ,f1,f2) as filter
-FROM union_all_bug_test_1
-
-union all
-
-select
-f1
-, 0 as filter
-from union_all_bug_test_2
-) A
-WHERE (filter = 1 and f1 = 1)
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-
-SELECT f1
-FROM (
-
-SELECT
-f1
-, if('helloworld' like '%hello%' ,f1,f2) as filter
-FROM union_all_bug_test_1
-
-union all
-
-select
-f1
-, 0 as filter
-from union_all_bug_test_2
-) A
-WHERE (filter = 1 and f1 = 1)
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: union_all_bug_test_1
-            Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((if(true, f1, f2) = 1) and (f1 = 1)) (type: boolean)
-              Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: 1 (type: int)
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
-                Union
-                  Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    expressions: 1 (type: int)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          TableScan
-            alias: union_all_bug_test_2
-            Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: false (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-              Select Operator
-                expressions: 1 (type: int)
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-                Union
-                  Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    expressions: 1 (type: int)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT f1
-FROM (
-
-SELECT
-f1
-, if('helloworld' like '%hello%' ,f1,f2) as filter
-FROM union_all_bug_test_1
-
-union all
-
-select
-f1
-, 0 as filter
-from union_all_bug_test_2
-) A
-WHERE (filter = 1 and f1 = 1)
+PREHOOK: query: drop table if exists map_json
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists map_json
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists map_json1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists map_json1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists map_json2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists map_json2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table map_json1(
+  id int,
+  val array<string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@map_json1
+POSTHOOK: query: create table map_json1(
+  id int,
+  val array<string>)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@map_json1
+PREHOOK: query: create table map_json2(
+  id int,
+  val array<string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@map_json2
+POSTHOOK: query: create table map_json2(
+  id int,
+  val array<string>)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@map_json2
+PREHOOK: query: create table map_json(
+  id int,
+  val array<string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@map_json
+POSTHOOK: query: create table map_json(
+  id int,
+  val array<string>)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@map_json
+PREHOOK: query: create view explode as
+select id, l from map_json1 LATERAL VIEW explode(val) tup as l
+UNION ALL
+select id, get_json_object(l, '$.daysLeft') as l
+from map_json2 LATERAL VIEW explode(val) tup as l
+UNION ALL
+select id, l from map_json LATERAL VIEW explode(val) elems as l
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@map_json
+PREHOOK: Input: default@map_json1
+PREHOOK: Input: default@map_json2
+PREHOOK: Output: database:default
+PREHOOK: Output: default@explode
+POSTHOOK: query: create view explode as
+select id, l from map_json1 LATERAL VIEW explode(val) tup as l
+UNION ALL
+select id, get_json_object(l, '$.daysLeft') as l
+from map_json2 LATERAL VIEW explode(val) tup as l
+UNION ALL
+select id, l from map_json LATERAL VIEW explode(val) elems as l
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@map_json
+POSTHOOK: Input: default@map_json1
+POSTHOOK: Input: default@map_json2
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@explode
+PREHOOK: query: select count(*) from explode where get_json_object(l, '$') is NOT NULL
 PREHOOK: type: QUERY
-PREHOOK: Input: default@union_all_bug_test_1
-PREHOOK: Input: default@union_all_bug_test_2
+PREHOOK: Input: default@explode
+PREHOOK: Input: default@map_json
+PREHOOK: Input: default@map_json1
+PREHOOK: Input: default@map_json2
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT f1
-FROM (
-
-SELECT
-f1
-, if('helloworld' like '%hello%' ,f1,f2) as filter
-FROM union_all_bug_test_1
-
-union all
-
-select
-f1
-, 0 as filter
-from union_all_bug_test_2
-) A
-WHERE (filter = 1 and f1 = 1)
+POSTHOOK: query: select count(*) from explode where get_json_object(l, '$') is NOT NULL
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@union_all_bug_test_1
-POSTHOOK: Input: default@union_all_bug_test_2
+POSTHOOK: Input: default@explode
+POSTHOOK: Input: default@map_json
+POSTHOOK: Input: default@map_json1
+POSTHOOK: Input: default@map_json2
 #### A masked pattern was here ####
-1
+0
+PREHOOK: query: drop view explode
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@explode
+PREHOOK: Output: default@explode
+POSTHOOK: query: drop view explode
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@explode
+POSTHOOK: Output: default@explode
+PREHOOK: query: drop table map_json
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@map_json
+PREHOOK: Output: default@map_json
+POSTHOOK: query: drop table map_json
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@map_json
+POSTHOOK: Output: default@map_json
+PREHOOK: query: drop table map_json1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@map_json1
+PREHOOK: Output: default@map_json1
+POSTHOOK: query: drop table map_json1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@map_json1
+POSTHOOK: Output: default@map_json1
+PREHOOK: query: drop table map_json2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@map_json2
+PREHOOK: Output: default@map_json2
+POSTHOOK: query: drop table map_json2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@map_json2
+POSTHOOK: Output: default@map_json2
 PREHOOK: query: SELECT f1
 FROM (
 
@@ -629,7 +606,7 @@ f1
 , 0 as filter
 from union_all_bug_test_2
 ) A
-WHERE (f1 = 1 and filter = 1)
+WHERE (filter = 1 and f1 = 1)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@union_all_bug_test_1
 PREHOOK: Input: default@union_all_bug_test_2
@@ -649,7 +626,7 @@ f1
 , 0 as filter
 from union_all_bug_test_2
 ) A
-WHERE (f1 = 1 and filter = 1)
+WHERE (filter = 1 and f1 = 1)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@union_all_bug_test_1
 POSTHOOK: Input: default@union_all_bug_test_2

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/vector_null_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_null_projection.q.out b/ql/src/test/results/clientpositive/vector_null_projection.q.out
index b3b44ad..c787fd4 100644
--- a/ql/src/test/results/clientpositive/vector_null_projection.q.out
+++ b/ql/src/test/results/clientpositive/vector_null_projection.q.out
@@ -100,8 +100,6 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: null (type: void)
-              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
                 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
@@ -121,8 +119,6 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: null (type: void)
-              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
                 Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE


[26/27] hive git commit: HIVE-12338: Add webui to HiveServer2 (Jimmy, reviewed by Mohit, Szehon, Lefty)

Posted by om...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/resources/hive-webapps/static/css/bootstrap.min.css
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/static/css/bootstrap.min.css b/service/src/resources/hive-webapps/static/css/bootstrap.min.css
new file mode 100755
index 0000000..0f6fbcd
--- /dev/null
+++ b/service/src/resources/hive-webapps/static/css/bootstrap.min.css
@@ -0,0 +1,9 @@
+/*!
+ * Bootstrap v3.0.0
+ *
+ * Copyright 2013 Twitter, Inc
+ * Licensed under the Apache License v2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Designed and built with all the love in the world by @mdo and @fat.
+ *//*! normalize.css v2.1.0 | MIT License | git.io/normalize */article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block}audio:not([controls]){display:none;height:0}[hidden]{display:none}html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:focus{outline:thin dotted}a:active,a:hover{outline:0}h1{margin:.67em 0;font-size:2em}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}dfn{font-style:italic}hr{height:0;-moz-box-sizing:content-box;box-sizing:content-box}mark{color:#000;background:#ff0}code,kbd,pre,samp{font-family:monospace,serif;font-size:1em}pre{white-space:pre-wrap}q{quotes:"\201C" "\201D" "\2018" "\2019"}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:0}fieldset{padding:.35em .625em .75em;margin:0 
 2px;border:1px solid #c0c0c0}legend{padding:0;border:0}button,input,select,textarea{margin:0;font-family:inherit;font-size:100%}button,input{line-height:normal}button,select{text-transform:none}button,html input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}button[disabled],html input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{padding:0;box-sizing:border-box}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}textarea{overflow:auto;vertical-align:top}table{border-collapse:collapse;border-spacing:0}@media print{*{color:#000!important;text-shadow:none!important;background:transparent!important;box-shadow:none!important}a,a:visited{text-decorati
 on:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:2cm .5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.table td,.table th{background-color:#fff!important}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table-bordered th,.table-bordered td{border:1px solid #ddd!important}}*,*:before,*:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:62.5%;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.428571429;color:#333;background-color:#fff}input,button,select
 ,textarea{font-family:inherit;font-size:inherit;line-height:inherit}button,input,select[multiple],textarea{background-image:none}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}img{vertical-align:middle}.img-responsive{display:block;height:auto;max-width:100%}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0 0 0 0);border:0}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16.099999999999998px;font-weight:200;line-height:1.4}@media(min-w
 idth:768px){.lead{font-size:21px}}small{font-size:85%}cite{font-style:normal}.text-muted{color:#999}.text-primary{color:#428bca}.text-warning{color:#c09853}.text-danger{color:#b94a48}.text-success{color:#468847}.text-info{color:#3a87ad}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-weight:500;line-height:1.1}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small{font-weight:normal;line-height:1;color:#999}h1,h2,h3{margin-top:20px;margin-bottom:10px}h4,h5,h6{margin-top:10px;margin-bottom:10px}h1,.h1{font-size:36px}h2,.h2{font-size:30px}h3,.h3{font-size:24px}h4,.h4{font-size:18px}h5,.h5{font-size:14px}h6,.h6{font-size:12px}h1 small,.h1 small{font-size:24px}h2 small,.h2 small{font-size:18px}h3 small,.h3 small,h4 small,.h4 small{font-size:14px}.page-header{padding-bottom:9px;margin:40px 0
  20px;border-bottom:1px solid #eee}ul,ol{margin-top:0;margin-bottom:10px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-bottom:20px}dt,dd{line-height:1.428571429}dt{font-weight:bold}dd{margin-left:0}@media(min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}.dl-horizontal dd:before,.dl-horizontal dd:after{display:table;content:" "}.dl-horizontal dd:after{clear:both}.dl-horizontal dd:before,.dl-horizontal dd:after{display:table;content:" "}.dl-horizontal dd:after{clear:both}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999}abbr.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;border-left:5px solid #eee}blockquote p{font-
 size:17.5px;font-weight:300;line-height:1.25}blockquote p:last-child{margin-bottom:0}blockquote small{display:block;line-height:1.428571429;color:#999}blockquote small:before{content:'\2014 \00A0'}blockquote.pull-right{padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0}blockquote.pull-right p,blockquote.pull-right small{text-align:right}blockquote.pull-right small:before{content:''}blockquote.pull-right small:after{content:'\00A0 \2014'}q:before,q:after,blockquote:before,blockquote:after{content:""}address{display:block;margin-bottom:20px;font-style:normal;line-height:1.428571429}code,pre{font-family:Monaco,Menlo,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;white-space:nowrap;background-color:#f9f2f4;border-radius:4px}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.428571429;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pr
 e.prettyprint{margin-bottom:20px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.container:before,.container:after{display:table;content:" "}.container:after{clear:both}.container:before,.container:after{display:table;content:" "}.container:after{clear:both}.row{margin-right:-15px;margin-left:-15px}.row:before,.row:after{display:table;content:" "}.row:after{clear:both}.row:before,.row:after{display:table;content:" "}.row:after{clear:both}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,
 .col-md-12,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11{float:left}.col-xs-1{width:8.333333333333332%}.col-xs-2{width:16.666666666666664%}.col-xs-3{width:25%}.col-xs-4{width:33.33333333333333%}.col-xs-5{width:41.66666666666667%}.col-xs-6{width:50%}.col-xs-7{width:58.333333333333336%}.col-xs-8{width:66.66666666666666%}.col-xs-9{width:75%}.col-xs-10{width:83.33333333333334%}.col-xs-11{width:91.66666666666666%}.col-xs-12{width:100%}@media(min-width:768px){.container{max-width:750px}.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11{float:left}.col-sm-1{width:8.333333333333332%}.col-sm-2{width:16.666666666666664%}.col-sm-3{width:25%}.col-sm-4{width:33.33333333333333%}.col
 -sm-5{width:41.66666666666667%}.col-sm-6{width:50%}.col-sm-7{width:58.333333333333336%}.col-sm-8{width:66.66666666666666%}.col-sm-9{width:75%}.col-sm-10{width:83.33333333333334%}.col-sm-11{width:91.66666666666666%}.col-sm-12{width:100%}.col-sm-push-1{left:8.333333333333332%}.col-sm-push-2{left:16.666666666666664%}.col-sm-push-3{left:25%}.col-sm-push-4{left:33.33333333333333%}.col-sm-push-5{left:41.66666666666667%}.col-sm-push-6{left:50%}.col-sm-push-7{left:58.333333333333336%}.col-sm-push-8{left:66.66666666666666%}.col-sm-push-9{left:75%}.col-sm-push-10{left:83.33333333333334%}.col-sm-push-11{left:91.66666666666666%}.col-sm-pull-1{right:8.333333333333332%}.col-sm-pull-2{right:16.666666666666664%}.col-sm-pull-3{right:25%}.col-sm-pull-4{right:33.33333333333333%}.col-sm-pull-5{right:41.66666666666667%}.col-sm-pull-6{right:50%}.col-sm-pull-7{right:58.333333333333336%}.col-sm-pull-8{right:66.66666666666666%}.col-sm-pull-9{right:75%}.col-sm-pull-10{right:83.33333333333334%}.col-sm-pull-11
 {right:91.66666666666666%}.col-sm-offset-1{margin-left:8.333333333333332%}.col-sm-offset-2{margin-left:16.666666666666664%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-4{margin-left:33.33333333333333%}.col-sm-offset-5{margin-left:41.66666666666667%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-7{margin-left:58.333333333333336%}.col-sm-offset-8{margin-left:66.66666666666666%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-10{margin-left:83.33333333333334%}.col-sm-offset-11{margin-left:91.66666666666666%}}@media(min-width:992px){.container{max-width:970px}.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11{float:left}.col-md-1{width:8.333333333333332%}.col-md-2{width:16.666666666666664%}.col-md-3{width:25%}.col-md-4{width:33.33333333333333%}.col-md-5{width:41.66666666666667%}.col-md-6{width:50%}.col-md-7{width:58.333333333333336%}.col-md-8{width:66.66666666666666%}.col-md-9{width:75%}.col-md-10{width:83.33333333333334%}.
 col-md-11{width:91.66666666666666%}.col-md-12{width:100%}.col-md-push-0{left:auto}.col-md-push-1{left:8.333333333333332%}.col-md-push-2{left:16.666666666666664%}.col-md-push-3{left:25%}.col-md-push-4{left:33.33333333333333%}.col-md-push-5{left:41.66666666666667%}.col-md-push-6{left:50%}.col-md-push-7{left:58.333333333333336%}.col-md-push-8{left:66.66666666666666%}.col-md-push-9{left:75%}.col-md-push-10{left:83.33333333333334%}.col-md-push-11{left:91.66666666666666%}.col-md-pull-0{right:auto}.col-md-pull-1{right:8.333333333333332%}.col-md-pull-2{right:16.666666666666664%}.col-md-pull-3{right:25%}.col-md-pull-4{right:33.33333333333333%}.col-md-pull-5{right:41.66666666666667%}.col-md-pull-6{right:50%}.col-md-pull-7{right:58.333333333333336%}.col-md-pull-8{right:66.66666666666666%}.col-md-pull-9{right:75%}.col-md-pull-10{right:83.33333333333334%}.col-md-pull-11{right:91.66666666666666%}.col-md-offset-0{margin-left:0}.col-md-offset-1{margin-left:8.333333333333332%}.col-md-offset-2{margin
 -left:16.666666666666664%}.col-md-offset-3{margin-left:25%}.col-md-offset-4{margin-left:33.33333333333333%}.col-md-offset-5{margin-left:41.66666666666667%}.col-md-offset-6{margin-left:50%}.col-md-offset-7{margin-left:58.333333333333336%}.col-md-offset-8{margin-left:66.66666666666666%}.col-md-offset-9{margin-left:75%}.col-md-offset-10{margin-left:83.33333333333334%}.col-md-offset-11{margin-left:91.66666666666666%}}@media(min-width:1200px){.container{max-width:1170px}.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11{float:left}.col-lg-1{width:8.333333333333332%}.col-lg-2{width:16.666666666666664%}.col-lg-3{width:25%}.col-lg-4{width:33.33333333333333%}.col-lg-5{width:41.66666666666667%}.col-lg-6{width:50%}.col-lg-7{width:58.333333333333336%}.col-lg-8{width:66.66666666666666%}.col-lg-9{width:75%}.col-lg-10{width:83.33333333333334%}.col-lg-11{width:91.66666666666666%}.col-lg-12{width:100%}.col-lg-push-0{left:auto}.col-lg-push-
 1{left:8.333333333333332%}.col-lg-push-2{left:16.666666666666664%}.col-lg-push-3{left:25%}.col-lg-push-4{left:33.33333333333333%}.col-lg-push-5{left:41.66666666666667%}.col-lg-push-6{left:50%}.col-lg-push-7{left:58.333333333333336%}.col-lg-push-8{left:66.66666666666666%}.col-lg-push-9{left:75%}.col-lg-push-10{left:83.33333333333334%}.col-lg-push-11{left:91.66666666666666%}.col-lg-pull-0{right:auto}.col-lg-pull-1{right:8.333333333333332%}.col-lg-pull-2{right:16.666666666666664%}.col-lg-pull-3{right:25%}.col-lg-pull-4{right:33.33333333333333%}.col-lg-pull-5{right:41.66666666666667%}.col-lg-pull-6{right:50%}.col-lg-pull-7{right:58.333333333333336%}.col-lg-pull-8{right:66.66666666666666%}.col-lg-pull-9{right:75%}.col-lg-pull-10{right:83.33333333333334%}.col-lg-pull-11{right:91.66666666666666%}.col-lg-offset-0{margin-left:0}.col-lg-offset-1{margin-left:8.333333333333332%}.col-lg-offset-2{margin-left:16.666666666666664%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-4{margin-left:33.3333
 3333333333%}.col-lg-offset-5{margin-left:41.66666666666667%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-7{margin-left:58.333333333333336%}.col-lg-offset-8{margin-left:66.66666666666666%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-10{margin-left:83.33333333333334%}.col-lg-offset-11{margin-left:91.66666666666666%}}table{max-width:100%;background-color:transparent}th{text-align:left}.table{width:100%;margin-bottom:20px}.table thead>tr>th,.table tbody>tr>th,.table tfoot>tr>th,.table thead>tr>td,.table tbody>tr>td,.table tfoot>tr>td{padding:8px;line-height:1.428571429;vertical-align:top;border-top:1px solid #ddd}.table thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table caption+thead tr:first-child th,.table colgroup+thead tr:first-child th,.table thead:first-child tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #ddd}.t
 able .table{background-color:#fff}.table-condensed thead>tr>th,.table-condensed tbody>tr>th,.table-condensed tfoot>tr>th,.table-condensed thead>tr>td,.table-condensed tbody>tr>td,.table-condensed tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-child(odd)>td,.table-striped>tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover>tbody>tr:hover>td,.table-hover>tbody>tr:hover>th{background-color:#f5f5f5}table col[class*="col-"]{display:table-column;float:none}table td[class*="col-"],table th[class*="col-"]{display:table-cell;float:none}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>t
 foot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#dff0d8;border-color:#d6e9c6}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td{background-color:#d0e9c6;border-color:#c9e2b3}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.tabl
 e>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#f2dede;border-color:#eed3d7}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td{background-color:#ebcccc;border-color:#e6c1c7}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#fcf8e3;border-color:#fbeed5}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td{background-color:#faf2cc;border-color:#f8e5be}@media(max-width:768px){.table-responsive{width:100%;margin-bottom:15px;overflow-x:scroll;overflow-y:hidden;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0;background-color:#fff}.table-responsive>
 .table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-respo
 nsive>.table-bordered>thead>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>thead>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;margin-bottom:5px;font-weight:bold}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type="file"]{display:block}select[multiple],select[size]{height:auto}select optgroup{font-family:inherit;font-size:inherit;font-style:inherit}input[type="file"]:focus,input[type="radio"]:focus,input[typ
 e="checkbox"]:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}input[type="number"]::-webkit-outer-spin-button,input[type="number"]::-webkit-inner-spin-button{height:auto}.form-control:-moz-placeholder{color:#999}.form-control::-moz-placeholder{color:#999}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.428571429;color:#555;vertical-align:middle;background-color:#fff;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6);box-shadow:inset 0
  1px 1px rgba(0,0,0,0.075),0 0 8px rgba(102,175,233,0.6)}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee}textarea.form-control{height:auto}.form-group{margin-bottom:15px}.radio,.checkbox{display:block;min-height:20px;padding-left:20px;margin-top:10px;margin-bottom:10px;vertical-align:middle}.radio label,.checkbox label{display:inline;margin-bottom:0;font-weight:normal;cursor:pointer}.radio input[type="radio"],.radio-inline input[type="radio"],.checkbox input[type="checkbox"],.checkbox-inline input[type="checkbox"]{float:left;margin-left:-20px}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{display:inline-block;padding-left:20px;margin-bottom:0;font-weight:normal;vertical-align:middle;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type="radio"][disabled],input[type="checkbox"][disabled],.radio[disabled],.radio-inl
 ine[disabled],.checkbox[disabled],.checkbox-inline[disabled],fieldset[disabled] input[type="radio"],fieldset[disabled] input[type="checkbox"],fieldset[disabled] .radio,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}textarea.input-sm{height:auto}.input-lg{height:45px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-lg{height:45px;line-height:45px}textarea.input-lg{height:auto}.has-warning .help-block,.has-warning .control-label{color:#c09853}.has-warning .form-control{border-color:#c09853;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-warning .form-control:focus{border-color:#a47e3c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0
  6px #dbc59e}.has-warning .input-group-addon{color:#c09853;background-color:#fcf8e3;border-color:#c09853}.has-error .help-block,.has-error .control-label{color:#b94a48}.has-error .form-control{border-color:#b94a48;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-error .form-control:focus{border-color:#953b39;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392}.has-error .input-group-addon{color:#b94a48;background-color:#f2dede;border-color:#b94a48}.has-success .help-block,.has-success .control-label{color:#468847}.has-success .form-control{border-color:#468847;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.has-success .form-control:focus{border-color:#356635;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b}.has-success .in
 put-group-addon{color:#468847;background-color:#dff0d8;border-color:#468847}.form-control-static{padding-top:7px;margin-bottom:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media(min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block}.form-inline .radio,.form-inline .checkbox{display:inline-block;padding-left:0;margin-top:0;margin-bottom:0}.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{float:none;margin-left:0}}.form-horizontal .control-label,.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}.form-horizontal .form-group:before,.form-horizontal .form-group:after{display:table;content:" "}.form-horizontal .form-group:after{clear:both}.form-horizontal
  .form-group:before,.form-horizontal .form-group:after{display:table;content:" "}.form-horizontal .form-group:after{clear:both}@media(min-width:768px){.form-horizontal .control-label{text-align:right}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:normal;line-height:1.428571429;text-align:center;white-space:nowrap;vertical-align:middle;cursor:pointer;border:1px solid transparent;border-radius:4px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none}.btn:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus{color:#333;text-decoration:none}.btn:active,.btn.active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{pointer-events:none;cursor:not-allowed;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow
 :none;box-shadow:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default:hover,.btn-default:focus,.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{color:#333;background-color:#ebebeb;border-color:#adadad}.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#fff;border-color:#ccc}.btn-primary{color:#fff;background-color:#428bca;border-color:#357ebd}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.
 active,.open .dropdown-toggle.btn-primary{color:#fff;background-color:#3276b1;border-color:#285e8e}.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#428bca;border-color:#357ebd}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{color:#fff;background-color:#ed9c28;border-color:#d58512}.btn-warning:active,.btn-warning.active,.open .dropd
 own-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#f0ad4e;border-color:#eea236}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{color:#fff;background-color:#d2322d;border-color:#ac2925}.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[di
 sabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#d9534f;border-color:#d43f3a}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{color:#fff;background-color:#47a447;border-color:#398439}.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.d
 isabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#5cb85c;border-color:#4cae4c}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{color:#fff;background-color:#39b3d7;border-color:#269abc}.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#5bc0de;border-color:#46
 b8da}.btn-link{font-weight:normal;color:#428bca;cursor:pointer;border-radius:0}.btn-link,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#2a6496;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#999;text-decoration:none}.btn-lg{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-sm,.btn-xs{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-xs{padding:1px 5px}.btn-block{display:block;width:100%;padding-right:0;padding-left:0}.btn-block+.btn-block{margin-top:5px}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .1
 5s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;transition:height .35s ease}@font-face{font-family:'Glyphicons Halflings';src:url('../fonts/glyphicons-halflings-regular.eot');src:url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'),url('../fonts/glyphicons-halflings-regular.woff') format('woff'),url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'),url('../fonts/glyphicons-halflings-regular.svg#glyphicons-halflingsregular') format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';-webkit-font-smoothing:antialiased;font-style:normal;font-weight:normal;line-height:1}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:befo
 re{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-r
 oad:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-print:before{content:"\e045"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.gly
 phicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphi
 con-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-re
 size-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{cont
 ent:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.
 glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-sa
 ve:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{conte
 nt:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-briefcase:before{content:"\1f4bc"}.glyphicon-calendar:before{content:"\1f4c5"}.glyphicon-pushpin:before{content:"\1f4cc"}.glyphicon-paperclip:before{content:"\1f4ce"}.glyphicon-camera:before{content:"\1f4f7"}.glyphicon-lock:before{content:"\1f512"}.glyphicon-bell:before{content:"\1f514"}.glyphicon-bookmark:before{content:"\1f516"}.glyphicon-fire:before{content:"\1f525"}.glyphicon-wrench:before{content:"\1f527"}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid #000;border-right:4px solid transparent;border-bottom:0 dotted;border-left:4px solid transparent;content:""}.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;list-style:none;background-color:#fff;border:1
 px solid #ccc;border:1px solid rgba(0,0,0,0.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,0.175);box-shadow:0 6px 12px rgba(0,0,0,0.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:1.428571429;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{color:#fff;text-decoration:none;background-color:#428bca}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;background-color:#428bca;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#999}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTr
 ansform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.428571429;color:#999}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0 dotted;border-bottom:4px solid #000;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}@media(min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}}.btn-default .caret{border-top-color:#333}.btn-primary .caret,.btn-success .caret,.btn-warning .caret,.btn-danger .caret,.btn-info .caret{border-top-color:#fff}.dropup .btn-default .caret{border-bottom-color:#333}.dropup .btn-primary .caret,.dropup .btn-success .caret,.dropup .btn-warning .caret,.dropup .btn-danger .caret,.dropup .btn-info .caret{border-bottom-color:#fff}.btn-group,.btn-
 group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group>.btn:focus,.btn-group-vertical>.btn:focus{outline:0}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar:before,.btn-toolbar:after{display:table;content:" "}.btn-toolbar:after{clear:both}.btn-toolbar:before,.btn-toolbar:after{display:table;content:" "}.btn-toolbar:after{clear:both}.btn-toolbar .btn-group{float:left}.btn-toolbar>.btn+.btn,.btn-toolbar>.btn-group+.btn,.btn-toolbar>.btn+.btn-group,.btn-toolbar>.btn-group+.btn-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-
 radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child>.btn:last-child,.btn-group>.btn-group:first-child>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child>.btn:first-child{border-bottom-left-radius:0;border-top-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group-xs>.btn{padding:5px 10px;padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-sm>.btn{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-lg>.btn{padding:10px 16px;font-size
 :18px;line-height:1.33;border-radius:6px}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,0.125);box-shadow:inset 0 3px 5px rgba(0,0,0,0.125)}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after{display:table;content:" "}.btn-group-vertical>.btn-group:after{clear:both}.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after{display:table;content:" "}.btn-group-vertical>.btn-group:after{clear:both}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.bt
 n,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-right-radius:0;border-bottom-left-radius:4px;border-top-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child>.btn:last-child,.btn-group-vertical>.btn-group:first-child>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child>.btn:first-child{border-top-right-radius:0;border-top-left-radius:0}.btn-group-justified{display:table;width:100%;border-collapse:separate;table-layout:fixed}.btn-group-justified .btn{display:table-cell;float:none;width:1%}[data-toggle="buttons"]>.btn>input[type="radi
 o"],[data-toggle="buttons"]>.btn>input[type="checkbox"]{display:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group.col{float:none;padding-right:0;padding-left:0}.input-group .form-control{width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:45px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:45px;line-height:45px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-gr
 oup-sm>.input-group-btn>.btn{height:30px;line-height:30px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:normal;line-height:1;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type="radio"],.input-group-addon input[type="checkbox"]{margin-top:0}.input-group .form-control:first-
 child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-4px}.input-group-btn>.btn:hover,.input-group-btn>.btn:active{z-index:2}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav:before,.nav:after{display:table;content:" "}.nav:after{clear:both}.nav:before,.nav:after{display:table;content:" "}.nav:after{clear:both}.nav>li{posit
 ion:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#999}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#999;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#eee;border-color:#428bca}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.428571429;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-botto
 m:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{text-align:center}@media(min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}}.nav-tabs.nav-justified>li>a{margin-right:0;border-bottom:1px solid #ddd}.nav-tabs.nav-justified>.active>a{border-bottom-color:#fff}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:5px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#fff;background-color:#428bca}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{text-align:center}@media(min-width:768px){.nav-justified>li{display:table-cell;width:1%}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-bottom:1px solid #ddd}.nav-tabs-justified>.active>a{border-bottom-color:#fff}.tabbable:before,.tabbable:after{display:table;content:" "}.tabbable:after{clear:both}.t
 abbable:before,.tabbable:after{display:table;content:" "}.tabbable:after{clear:both}.tab-content>.tab-pane,.pill-content>.pill-pane{display:none}.tab-content>.active,.pill-content>.active{display:block}.nav .caret{border-top-color:#428bca;border-bottom-color:#428bca}.nav a:hover .caret{border-top-color:#2a6496;border-bottom-color:#2a6496}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-right-radius:0;border-top-left-radius:0}.navbar{position:relative;z-index:1000;min-height:50px;margin-bottom:20px;border:1px solid transparent}.navbar:before,.navbar:after{display:table;content:" "}.navbar:after{clear:both}.navbar:before,.navbar:after{display:table;content:" "}.navbar:after{clear:both}@media(min-width:768px){.navbar{border-radius:4px}}.navbar-header:before,.navbar-header:after{display:table;content:" "}.navbar-header:after{clear:both}.navbar-header:before,.navbar-header:after{display:table;content:" "}.navbar-header:after{clear:both}@media(min-width:768px){.navbar-header{float:left
 }}.navbar-collapse{max-height:340px;padding-right:15px;padding-left:15px;overflow-x:visible;border-top:1px solid transparent;box-shadow:inset 0 1px 0 rgba(255,255,255,0.1);-webkit-overflow-scrolling:touch}.navbar-collapse:before,.navbar-collapse:after{display:table;content:" "}.navbar-collapse:after{clear:both}.navbar-collapse:before,.navbar-collapse:after{display:table;content:" "}.navbar-collapse:after{clear:both}.navbar-collapse.in{overflow-y:auto}@media(min-width:768px){.navbar-collapse{width:auto;border-top:0;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-collapse .navbar-nav.navbar-left:first-child{margin-left:-15px}.navbar-collapse .navbar-nav.navbar-right:last-child{margin-right:-15px}.navbar-collapse .navbar-text:last-child{margin-right:0}}.container>.navbar-header,.container>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media(min-widt
 h:768px){.container>.navbar-header,.container>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{border-width:0 0 1px}@media(min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;border-width:0 0 1px}@media(min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;z-index:1030}.navbar-fixed-bottom{bottom:0;margin-bottom:0}.navbar-brand{float:left;padding:3px 15px;font-size:18px;line-height:20px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}@media(min-width:768px){.navbar>.container .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;border:1px solid transparent;border-radius:4px}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media(min-width:768px
 ){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media(max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media(min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}@media(min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:i
 nset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1)}@media(min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;padding-left:0;margin-top:0;margin-bottom:0}.navbar-form .radio input[type="radio"],.navbar-form .checkbox input[type="checkbox"]{float:none;margin-left:0}}@media(max-width:767px){.navbar-form .form-group{margin-bottom:5px}}@media(min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-right-radius:0;border-top-left-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-nav.pull-right>li>.dropdown-menu,.navbar-na
 v>li>.dropdown-menu.pull-right{right:0;left:auto}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-text{float:left;margin-top:15px;margin-bottom:15px}@media(min-width:768px){.navbar-text{margin-right:15px;margin-left:15px}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#ccc;background-color:transparent}.navbar-default .
 navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#ccc}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e6e6e6}.navbar-default .navbar-nav>.dropdown>a:hover .caret,.navbar-default .navbar-nav>.dropdown>a:focus .caret{border-top-color:#333;border-bottom-color:#333}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.open>a .caret,.navbar-default .navbar-nav>.open>a:hover .caret,.navbar-default .navbar-nav>.open>a:focus .caret{border-top-color:#555;border-bottom-color:#555}.navbar-default .navbar-nav>.dropdown>a .caret{border-top-color:#777;border-bottom-color:#777}@media(max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown
 -menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#999}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#999}.navbar-inverse .navbar-nav>li>a{color:#999}.navbar-inverse .navbar-nav>li>a:hove
 r,.navbar-inverse .navbar-nav>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.dropdown>a:hover .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .navbar-nav>.dropdo
 wn>a .caret{border-top-color:#999;border-bottom-color:#999}.navbar-inverse .navbar-nav>.open>a .caret,.navbar-inverse .navbar-nav>.open>a:hover .caret,.navbar-inverse .navbar-nav>.open>a:focus .caret{border-top-color:#fff;border-bottom-color:#fff}@media(max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#999}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:
 focus{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover{color:#fff}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#999}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.428571429;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-bottom-left-radius:4px;border-top-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.p
 agination>li>span:focus{background-color:#eee}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#fff;cursor:default;background-color:#428bca;border-color:#428bca}.pagination>.disabled>span,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#999;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-bottom-left-radius:6px;border-top-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-bottom-left-radius:3px;border-top-left-radius:3px}
 .pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager:before,.pager:after{display:table;content:" "}.pager:after{clear:both}.pager:before,.pager:after{display:table;content:" "}.pager:after{clear:both}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#999;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:bold;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.
 25em}.label[href]:hover,.label[href]:focus{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.label-default{background-color:#999}.label-default[href]:hover,.label-default[href]:focus{background-color:#808080}.label-primary{background-color:#428bca}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#3071a9}.label-success{background-color:#5cb85c}.label-success[href]:hover,.label-success[href]:focus{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:hover,.label-info[href]:focus{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:bold;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;backg
 round-color:#999;border-radius:10px}.badge:empty{display:none}a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}.btn .badge{position:relative;top:-1px}a.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#428bca;background-color:#fff}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px;margin-bottom:30px;font-size:21px;font-weight:200;line-height:2.1428571435;color:inherit;background-color:#eee}.jumbotron h1{line-height:1;color:inherit}.jumbotron p{line-height:1.4}.container .jumbotron{border-radius:6px}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron{padding-right:60px;padding-left:60px}.jumbotron h1{font-size:63px}}.thumbnail{display:inline-block;display:block;height:auto;max-width:100%;padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.thumbnail>img{display:blo
 ck;height:auto;max-width:100%}a.thumbnail:hover,a.thumbnail:focus{border-color:#428bca}.thumbnail>img{margin-right:auto;margin-left:auto}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:bold}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable{padding-right:35px}.alert-dismissable .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#356635}.alert-info{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#2d6987}.alert-warning{color:#c09853;background-color:#fcf8e3;border-color:#fbeed5}.alert-warning hr{border-top-color:#f8e5be}.alert-warning .alert-link{color:#a47e3c}.alert-dange
 r{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.alert-danger hr{border-top-color:#e6c1c7}.alert-danger .alert-link{color:#953b39}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-moz-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:0 0}to{background-position:40px 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;color:#fff;text-align:center;background-color:#428bca;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-transition:width .6s ease;transition:width .6s ease}.progre
 ss-striped .progress-bar{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-size:40px 40px}.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-moz-animation:progress-
 bar-stripes 2s linear infinite;-ms-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,tran
 sparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,t
 ransparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.pr
 ogress-striped .progress-bar-danger{background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.media,.media-body{overflow:hidden;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{dis
 play:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-right-radius:4px;border-top-left-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:hover,a.list-group-item:focus{text-decoration:none;background-color:#f5f5f5}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca}.list-group-item.active .list-group-item-heading,.list-group-ite
 m.active:hover .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:hover .list-group-item-text,.list-group-item.active:focus .list-group-item-text{color:#e1edf7}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,0.05);box-shadow:0 1px 1px rgba(0,0,0,0.05)}.panel-body{padding:15px}.panel-body:before,.panel-body:after{display:table;content:" "}.panel-body:after{clear:both}.panel-body:before,.panel-body:after{display:table;content:" "}.panel-body:after{clear:both}.panel>.list-group{margin-bottom:0}.panel>.list-group .list-group-item{border-width:1px 0}.panel>.list-group .list-group-item:first-child{border-top-right-radius:0;border-top-left-radius:0}.panel>.list-group .list-group-item:l
 ast-child{border-bottom:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.panel>.table{margin-bottom:0}.panel>.panel-body+.table{border-top:1px solid #ddd}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-right-radius:3px;border-top-left-radius:3px}.panel-title{margin-top:0;margin-bottom:0;font-size:16px}.panel-title>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-group .panel{margin-bottom:0;overflow:hidden;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse .panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5
 ;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse .panel-body{border-top-color:#ddd}.panel-default>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#428bca}.panel-primary>.panel-heading{color:#fff;background-color:#428bca;border-color:#428bca}.panel-primary>.panel-heading+.panel-collapse .panel-body{border-top-color:#428bca}.panel-primary>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#428bca}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse .panel-body{border-top-color:#d6e9c6}.panel-success>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#d6e9c6}.panel-warning{border-color:#fbeed5}.panel-warning>.panel-heading{color:#c09853;background-color:#fcf8e3;border-color:#fbeed5}.panel-warning>.panel-heading+.panel-collapse .panel-body{border-top-color:#fbeed5}.panel-warning>.panel-
 footer+.panel-collapse .panel-body{border-bottom-color:#fbeed5}.panel-danger{border-color:#eed3d7}.panel-danger>.panel-heading{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.panel-danger>.panel-heading+.panel-collapse .panel-body{border-top-color:#eed3d7}.panel-danger>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#eed3d7}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse .panel-body{border-top-color:#bce8f1}.panel-info>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#bce8f1}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}
 .close{float:right;font-size:21px;font-weight:bold;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;opacity:.5;filter:alpha(opacity=50)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}body.modal-open,.modal-open .navbar-fixed-top,.modal-open .navbar-fixed-bottom{margin-right:15px}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;display:none;overflow:auto;overflow-y:scroll}.modal.fade .modal-dialog{-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);transform:translate(0,-25%);-webkit-transition:-webkit-transform .3s ease-out;-moz-transition:-moz-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.modal-dialog{z-index
 :1050;width:auto;padding:10px;margin-right:auto;margin-left:auto}.modal-content{position:relative;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,0.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,0.5);box-shadow:0 3px 9px rgba(0,0,0,0.5);background-clip:padding-box}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1030;background-color:#000}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{min-height:16.428571429px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.428571429}.modal-body{position:relative;padding:20px}.modal-footer{padding:19px 20px 20px;margin-top:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer:before,.modal-footer:after{display:table;content:" "}.modal-footer:after{clear:both}.modal-footer:before,.modal-footer:after{display:table;content:" "}.mo
 dal-footer:after{clear:both}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}@media screen and (min-width:768px){.modal-dialog{right:auto;left:50%;width:600px;padding-top:30px;padding-bottom:30px}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,0.5);box-shadow:0 5px 15px rgba(0,0,0,0.5)}}.tooltip{position:absolute;z-index:1030;display:block;font-size:12px;line-height:1.4;opacity:0;filter:alpha(opacity=0);visibility:visible}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:sol
 id}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.top-left .tooltip-arrow{bottom:0;left:5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.top-right .tooltip-arrow{right:5px;bottom:0;border-top-color:#000;border-width:5px 5px 0}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-right-color:#000;border-width:5px 5px 5px 0}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-left-color:#000;border-width:5px 0 5px 5px}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-bottom-color:#000;border-width:0 5px 5px}.tooltip.bottom-left .tooltip-arrow{top:0;left:5px;border-bottom-color:#000;border-width:0 5px 5px}.tooltip.bottom-right .tooltip-arrow{top:0;right:5px;border-bottom-color:#000;border-width:0 5px 5px}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;max-width:276px;padding:1px;text-align:left;white-space:normal;background-color:#fff;border:
 1px solid #ccc;border:1px solid rgba(0,0,0,0.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);background-clip:padding-box}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:normal;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover .arrow,.popover .arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow{border-width:11px}.popover .arrow:after{border-width:10px;content:""}.popover.top .arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,0.25);border-bottom-width:0}.popover.top .arrow:after{bottom:1px;margin-left:-10px;border-top-color:#fff;border-bottom-width:0;content:" "}.popover.right .arr
 ow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,0.25);border-left-width:0}.popover.right .arrow:after{bottom:-10px;left:1px;border-right-color:#fff;border-left-width:0;content:" "}.popover.bottom .arrow{top:-11px;left:50%;margin-left:-11px;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,0.25);border-top-width:0}.popover.bottom .arrow:after{top:1px;margin-left:-10px;border-bottom-color:#fff;border-top-width:0;content:" "}.popover.left .arrow{top:50%;right:-11px;margin-top:-11px;border-left-color:#999;border-left-color:rgba(0,0,0,0.25);border-right-width:0}.popover.left .arrow:after{right:1px;bottom:-10px;border-left-color:#fff;border-right-width:0;content:" "}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:b
 lock;height:auto;max-width:100%;line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6);opacity:.5;filter:alpha(opacity=50)}.carousel-control.left{background-image:-webkit-gradient(linear,0 top,100% top,from(rgba(0,0,0,0.5)),to(rgba(0,0,0,0.0001)));background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,0.5) 0),color-stop(rgba(0,0,0,0.0001) 100%));background-image:-moz-linear-gradient(left,rgba(0,0,0,0.5) 0,rgba(0,0,0,0.0001) 100%);background-image:linear-gradient(to right,
 rgba(0,0,0,0.5) 0,rgba(0,0,0,0.0001) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000',endColorstr='#00000000',GradientType=1)}.carousel-control.right{right:0;left:auto;background-image:-webkit-gradient(linear,0 top,100% top,from(rgba(0,0,0,0.0001)),to(rgba(0,0,0,0.5)));background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,0.0001) 0),color-stop(rgba(0,0,0,0.5) 100%));background-image:-moz-linear-gradient(left,rgba(0,0,0,0.0001) 0,rgba(0,0,0,0.5) 100%);background-image:linear-gradient(to right,rgba(0,0,0,0.0001) 0,rgba(0,0,0,0.5) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000',endColorstr='#80000000',GradientType=1)}.carousel-control:hover,.carousel-control:focus{color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyph
 icon-chevron-right{position:absolute;top:50%;left:50%;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;margin-top:-10px;margin-left:-10px;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,0.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .icon-prev,.ca
 rousel-control .icon-next{width:30px;height:30

<TRUNCATED>

[10/27] hive git commit: HIVE-12463: VectorMapJoinFastKeyStore has Array OOB errors (Gopal V, reviewed by Sergey Shelukhin)

Posted by om...@apache.org.
HIVE-12463: VectorMapJoinFastKeyStore has Array OOB errors (Gopal V, reviewed by Sergey Shelukhin)

Signed-off-by: Gopal V <go...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/11320551
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/11320551
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/11320551

Branch: refs/heads/master-fixed
Commit: 11320551c32996ef9ced5eec072c1f03b5cfae71
Parents: 2da3436
Author: Gopal V <go...@apache.org>
Authored: Wed Nov 25 01:01:15 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:35 2015 -0800

----------------------------------------------------------------------
 .../mapjoin/fast/VectorMapJoinFastKeyStore.java | 17 ++---
 .../apache/hadoop/hive/serde2/WriteBuffers.java | 69 ++++++++++----------
 2 files changed, 38 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/11320551/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java
index 58af4eb..efdcd43 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastKeyStore.java
@@ -30,7 +30,6 @@ public class VectorMapJoinFastKeyStore {
 
   private WriteBuffers writeBuffers;
 
-  private WriteBuffers.ByteSegmentRef byteSegmentRef;
   private WriteBuffers.Position readPos;
 
   /**
@@ -141,17 +140,11 @@ public class VectorMapJoinFastKeyStore {
     }
 
     // Our reading is positioned to the key.
-    writeBuffers.getByteSegmentRefToCurrent(byteSegmentRef, keyLength, readPos);
-
-    byte[] currentBytes = byteSegmentRef.getBytes();
-    int currentStart = (int) byteSegmentRef.getOffset();
-
-    for (int i = 0; i < keyLength; i++) {
-      if (currentBytes[currentStart + i] != keyBytes[keyStart + i]) {
-        // LOG.debug("VectorMapJoinFastKeyStore equalKey no match on bytes");
-        return false;
-      }
+    if (!writeBuffers.isEqual(keyBytes, keyStart, readPos, keyLength)) {
+      // LOG.debug("VectorMapJoinFastKeyStore equalKey no match on bytes");
+      return false;
     }
+
     // LOG.debug("VectorMapJoinFastKeyStore equalKey match on bytes");
     return true;
   }
@@ -159,7 +152,6 @@ public class VectorMapJoinFastKeyStore {
   public VectorMapJoinFastKeyStore(int writeBuffersSize) {
     writeBuffers = new WriteBuffers(writeBuffersSize, AbsoluteKeyOffset.maxSize);
 
-    byteSegmentRef = new WriteBuffers.ByteSegmentRef();
     readPos = new WriteBuffers.Position();
   }
 
@@ -167,7 +159,6 @@ public class VectorMapJoinFastKeyStore {
     // TODO: Check if maximum size compatible with AbsoluteKeyOffset.maxSize.
     this.writeBuffers = writeBuffers;
 
-    byteSegmentRef = new WriteBuffers.ByteSegmentRef();
     readPos = new WriteBuffers.Position();
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/11320551/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java b/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
index b47456e..5900428 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
@@ -282,32 +282,33 @@ public final class WriteBuffers implements RandomAccessOutput {
     return true;
   }
 
-  /**
-   * Compares part of the buffer with a part of an external byte array.
-   * Does not modify readPoint.
-   */
-  public boolean isEqual(byte[] left, int leftLength, long rightOffset, int rightLength) {
-    if (rightLength != leftLength) {
-      return false;
+  private final boolean isEqual(byte[] left, int leftOffset, int rightIndex, int rightFrom, int length) {
+    if (length == 0) {
+      return true;
     }
-    int rightIndex = getBufferIndex(rightOffset), rightFrom = getOffset(rightOffset);
+    // invariant: rightLength = leftLength
+    // rightOffset is within the buffers
     byte[] rightBuffer = writeBuffers.get(rightIndex);
-    if (rightFrom + rightLength <= wbSize) {
+    if (rightFrom + length <= wbSize) {
       // TODO: allow using unsafe optionally.
-      for (int i = 0; i < leftLength; ++i) {
-        if (left[i] != rightBuffer[rightFrom + i]) {
+      // bounds check first, to trigger bugs whether the first byte matches or not
+      if (left[leftOffset + length - 1] != rightBuffer[rightFrom + length - 1]) {
+        return false;
+      }
+      for (int i = 0; i < length; ++i) {
+        if (left[leftOffset + i] != rightBuffer[rightFrom + i]) {
           return false;
         }
       }
       return true;
     }
-    for (int i = 0; i < rightLength; ++i) {
+    for (int i = 0; i < length; ++i) {
       if (rightFrom == wbSize) {
         ++rightIndex;
         rightBuffer = writeBuffers.get(rightIndex);
         rightFrom = 0;
       }
-      if (left[i] != rightBuffer[rightFrom++]) {
+      if (left[leftOffset + i] != rightBuffer[rightFrom++]) {
         return false;
       }
     }
@@ -318,32 +319,30 @@ public final class WriteBuffers implements RandomAccessOutput {
    * Compares part of the buffer with a part of an external byte array.
    * Does not modify readPoint.
    */
-  public boolean isEqual(byte[] left, int leftOffset, int leftLength, long rightOffset, int rightLength) {
+  public boolean isEqual(byte[] left, int leftLength, long rightOffset, int rightLength) {
     if (rightLength != leftLength) {
       return false;
     }
-    int rightIndex = getBufferIndex(rightOffset), rightFrom = getOffset(rightOffset);
-    byte[] rightBuffer = writeBuffers.get(rightIndex);
-    if (rightFrom + rightLength <= wbSize) {
-      // TODO: allow using unsafe optionally.
-      for (int i = 0; i < leftLength; ++i) {
-        if (left[leftOffset + i] != rightBuffer[rightFrom + i]) {
-          return false;
-        }
-      }
-      return true;
-    }
-    for (int i = 0; i < rightLength; ++i) {
-      if (rightFrom == wbSize) {
-        ++rightIndex;
-        rightBuffer = writeBuffers.get(rightIndex);
-        rightFrom = 0;
-      }
-      if (left[leftOffset + i] != rightBuffer[rightFrom++]) {
-        return false;
-      }
+    return isEqual(left, 0, getBufferIndex(rightOffset), getOffset(rightOffset), leftLength);
+  }
+
+  /**
+   * Compares part of the buffer with a part of an external byte array.
+   * Does not modify readPoint.
+   */
+  public boolean isEqual(byte[] left, int leftOffset, int leftLength, long rightOffset, int rightLength) {
+    if (rightLength != leftLength) {
+      return false;
     }
-    return true;
+    return isEqual(left, leftOffset, getBufferIndex(rightOffset), getOffset(rightOffset), leftLength);
+  }
+
+  /**
+   * Compares the current readPosition of the buffer with the external byte array.
+   * Does not modify readPoint.
+   */
+  public boolean isEqual(byte[] left, int leftOffset, Position readPos, int length) {
+    return isEqual(left, leftOffset, readPos.bufferIndex, readPos.offset, length);
   }
 
   public void clear() {


[09/27] hive git commit: HIVE-12520 : Fix schema_evol* tests on master (Ashutosh Chauhan via Prasanth J)

Posted by om...@apache.org.
HIVE-12520 : Fix schema_evol* tests on master (Ashutosh Chauhan via Prasanth J)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/22b6203d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/22b6203d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/22b6203d

Branch: refs/heads/master-fixed
Commit: 22b6203d4bf11de9b5d1b9e53da91b64c147cda4
Parents: 68e1c0b
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Wed Nov 25 09:40:38 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:35 2015 -0800

----------------------------------------------------------------------
 hbase-handler/src/test/results/positive/hbase_queries.q.out   | 1 +
 ql/src/test/queries/clientpositive/insert_values_nonascii.q   | 2 +-
 .../clientpositive/schema_evol_orc_acid_mapwork_part.q        | 2 +-
 .../clientpositive/schema_evol_orc_acid_mapwork_table.q       | 2 +-
 .../clientpositive/schema_evol_orc_acidvec_mapwork_part.q     | 2 +-
 .../clientpositive/schema_evol_orc_acidvec_mapwork_table.q    | 2 +-
 .../clientpositive/schema_evol_orc_nonvec_fetchwork_table.q   | 3 +--
 .../clientpositive/schema_evol_orc_nonvec_mapwork_table.q     | 2 +-
 .../clientpositive/schema_evol_orc_vec_mapwork_table.q        | 2 +-
 .../queries/clientpositive/schema_evol_text_fetchwork_table.q | 2 +-
 .../queries/clientpositive/schema_evol_text_mapwork_table.q   | 2 +-
 ql/src/test/results/clientpositive/limit_join_transpose.q.out | 7 +++++++
 .../results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out   | 2 ++
 13 files changed, 20 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/hbase-handler/src/test/results/positive/hbase_queries.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out b/hbase-handler/src/test/results/positive/hbase_queries.q.out
index fa348e3..1ab9877 100644
--- a/hbase-handler/src/test/results/positive/hbase_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out
@@ -176,6 +176,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string)
               sort order: ++
               Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/queries/clientpositive/insert_values_nonascii.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_nonascii.q b/ql/src/test/queries/clientpositive/insert_values_nonascii.q
index 2e4ef41..52b34e9 100644
--- a/ql/src/test/queries/clientpositive/insert_values_nonascii.q
+++ b/ql/src/test/queries/clientpositive/insert_values_nonascii.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table insert_values_nonascii(t1 char(32), t2 string);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_part.q
index 681a4ac..d6919c1 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_part.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_part.q
@@ -1,7 +1,7 @@
 set hive.cli.print.header=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 SET hive.vectorized.execution.enabled=false;
 set hive.fetch.task.conversion=none;
 set hive.exec.dynamic.partition.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_table.q
index bde5d50..8c933e1 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_table.q
@@ -1,7 +1,7 @@
 set hive.cli.print.header=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 SET hive.vectorized.execution.enabled=false;
 set hive.fetch.task.conversion=none;
 set hive.exec.dynamic.partition.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q
index 6b75505..a8770fc 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q
@@ -1,7 +1,7 @@
 set hive.cli.print.header=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 SET hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
 set hive.exec.dynamic.partition.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q
index 0edca16..d6e82f5 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q
@@ -1,7 +1,7 @@
 set hive.cli.print.header=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 SET hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
 set hive.exec.dynamic.partition.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q
index 04189cd..8e7e373 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q
@@ -1,7 +1,6 @@
 set hive.cli.print.header=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
 SET hive.vectorized.execution.enabled=false;
 set hive.fetch.task.conversion=more;
 set hive.exec.dynamic.partition.mode=nonstrict;
@@ -53,4 +52,4 @@ select a,b from table2;
 
 
 DROP TABLE table1;
-DROP TABLE table2;
\ No newline at end of file
+DROP TABLE table2;

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q
index db57965..6c256ea 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q
@@ -1,7 +1,7 @@
 set hive.cli.print.header=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 SET hive.vectorized.execution.enabled=false;
 set hive.fetch.task.conversion=none;
 set hive.exec.dynamic.partition.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q
index d80d7be..6df2095 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q
@@ -1,7 +1,7 @@
 set hive.cli.print.header=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 SET hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
 set hive.exec.dynamic.partition.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q
index 0c85044..7de5367 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q
@@ -1,7 +1,7 @@
 set hive.cli.print.header=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 SET hive.vectorized.execution.enabled=false;
 set hive.fetch.task.conversion=none;
 set hive.exec.dynamic.partition.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q
index 0c85044..7de5367 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q
@@ -1,7 +1,7 @@
 set hive.cli.print.header=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 SET hive.vectorized.execution.enabled=false;
 set hive.fetch.task.conversion=none;
 set hive.exec.dynamic.partition.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/results/clientpositive/limit_join_transpose.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/limit_join_transpose.q.out b/ql/src/test/results/clientpositive/limit_join_transpose.q.out
index 8717154..3181cdf 100644
--- a/ql/src/test/results/clientpositive/limit_join_transpose.q.out
+++ b/ql/src/test/results/clientpositive/limit_join_transpose.q.out
@@ -206,6 +206,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -327,6 +328,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -498,6 +500,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -563,6 +566,7 @@ STAGE PLANS:
             Reduce Output Operator
               sort order: 
               Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -692,6 +696,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -758,6 +763,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -821,6 +827,7 @@ STAGE PLANS:
               key expressions: _col2 (type: string)
               sort order: +
               Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/22b6203d/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out b/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out
index 29ffb47..efce91e 100644
--- a/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_3.q.out
@@ -37,6 +37,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
         Map 4 
             Map Operator Tree:
@@ -135,6 +136,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
         Map 3 
             Map Operator Tree:


[23/27] hive git commit: HIVE-12008: Hive queries failing when using count(*) on column in view (Yongzhi Chen, reviewed by Szehon ho)

Posted by om...@apache.org.
HIVE-12008: Hive queries failing when using count(*) on column in view (Yongzhi Chen, reviewed by Szehon ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f2e46a2a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f2e46a2a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f2e46a2a

Branch: refs/heads/master-fixed
Commit: f2e46a2a85feace15549e07e1b4e835fbbbd6798
Parents: b94a217
Author: Yongzhi Chen <yc...@apache.org>
Authored: Sun Nov 29 01:06:22 2015 -0500
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:37 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/UnionOperator.java      |   8 +-
 .../hive/ql/optimizer/ColumnPrunerProcCtx.java  |   2 +-
 .../clientpositive/unionall_unbalancedppd.q     |  57 ++-
 .../results/clientpositive/spark/union16.q.out  |  16 +-
 .../results/clientpositive/spark/union2.q.out   |  16 +-
 .../results/clientpositive/spark/union9.q.out   |  16 +-
 .../clientpositive/spark/union_view.q.out       |  24 --
 .../results/clientpositive/tez/union2.q.out     |  28 +-
 .../results/clientpositive/tez/union9.q.out     |  40 +--
 .../tez/vector_null_projection.q.out            |   4 -
 .../test/results/clientpositive/union16.q.out   | 354 ++++++++-----------
 ql/src/test/results/clientpositive/union2.q.out |  32 +-
 ql/src/test/results/clientpositive/union9.q.out |  46 ++-
 .../results/clientpositive/union_view.q.out     |  24 --
 .../clientpositive/unionall_unbalancedppd.q.out | 261 +++++++-------
 .../clientpositive/vector_null_projection.q.out |   4 -
 16 files changed, 388 insertions(+), 544 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
index a49097c..ddb23ee 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
@@ -62,13 +62,16 @@ public class UnionOperator extends Operator<UnionDesc> implements Serializable {
     int parents = parentOperators.size();
     parentObjInspectors = new StructObjectInspector[parents];
     parentFields = new List[parents];
+    int columns = 0;
     for (int p = 0; p < parents; p++) {
       parentObjInspectors[p] = (StructObjectInspector) inputObjInspectors[p];
       parentFields[p] = parentObjInspectors[p].getAllStructFieldRefs();
+      if (p == 0 || parentFields[p].size() < columns) {
+        columns = parentFields[p].size();
+      }
     }
 
     // Get columnNames from the first parent
-    int columns = parentFields[0].size();
     ArrayList<String> columnNames = new ArrayList<String>(columns);
     for (int c = 0; c < columns; c++) {
       columnNames.add(parentFields[0].get(c).getFieldName());
@@ -81,7 +84,8 @@ public class UnionOperator extends Operator<UnionDesc> implements Serializable {
     }
 
     for (int p = 0; p < parents; p++) {
-      assert (parentFields[p].size() == columns);
+      //When columns is 0, the union operator is empty.
+      assert (columns == 0 || parentFields[p].size() == columns);
       for (int c = 0; c < columns; c++) {
         if (!columnTypeResolvers[c].updateForUnionAll(parentFields[p].get(c)
             .getFieldObjectInspector())) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java
index b18a034..7befd3b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java
@@ -108,7 +108,7 @@ public class ColumnPrunerProcCtx implements NodeProcessorCtx {
         prunList = joinPrunedColLists.get(child).get((byte) tag);
       } else if (child instanceof UnionOperator) {
         List<Integer> positions = unionPrunedColLists.get(child);
-        if (positions != null && positions.size() > 0) {
+        if (positions != null) {
           prunList = new ArrayList<>();
           RowSchema oldRS = curOp.getSchema();
           for (Integer pos : positions) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/queries/clientpositive/unionall_unbalancedppd.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/unionall_unbalancedppd.q b/ql/src/test/queries/clientpositive/unionall_unbalancedppd.q
index 360ad11..a704860 100644
--- a/ql/src/test/queries/clientpositive/unionall_unbalancedppd.q
+++ b/ql/src/test/queries/clientpositive/unionall_unbalancedppd.q
@@ -4,6 +4,7 @@ set hive.optimize.ppd=true;
 
 drop table if exists union_all_bug_test_1;
 drop table if exists union_all_bug_test_2;
+
 create table if not exists union_all_bug_test_1
 (
 f1 int,
@@ -141,42 +142,36 @@ from union_all_bug_test_2
 ) A
 WHERE (f1 = 1);
 
-explain
-
-SELECT f1
-FROM (
-
-SELECT
-f1
-, if('helloworld' like '%hello%' ,f1,f2) as filter
-FROM union_all_bug_test_1
-
-union all
+drop table if exists map_json;
+drop table if exists map_json1;
+drop table if exists map_json2;
 
-select
-f1
-, 0 as filter
-from union_all_bug_test_2
-) A
-WHERE (filter = 1 and f1 = 1);
+create table map_json1(
+  id int,
+  val array<string>);
 
-SELECT f1
-FROM (
+create table map_json2(
+  id int,
+  val array<string>);
 
-SELECT
-f1
-, if('helloworld' like '%hello%' ,f1,f2) as filter
-FROM union_all_bug_test_1
+create table map_json(
+  id int,
+  val array<string>);
 
-union all
+create view explode as
+select id, l from map_json1 LATERAL VIEW explode(val) tup as l
+UNION ALL
+select id, get_json_object(l, '$.daysLeft') as l
+from map_json2 LATERAL VIEW explode(val) tup as l
+UNION ALL
+select id, l from map_json LATERAL VIEW explode(val) elems as l;
 
-select
-f1
-, 0 as filter
-from union_all_bug_test_2
-) A
-WHERE (filter = 1 and f1 = 1);
+select count(*) from explode where get_json_object(l, '$') is NOT NULL;
 
+drop view explode;
+drop table map_json;
+drop table map_json1;
+drop table map_json2;
 SELECT f1
 FROM (
 
@@ -192,4 +187,4 @@ f1
 , 0 as filter
 from union_all_bug_test_2
 ) A
-WHERE (f1 = 1 and filter = 1);
+WHERE (filter = 1 and f1 = 1);

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/spark/union16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union16.q.out b/ql/src/test/results/clientpositive/spark/union16.q.out
index 5170bbd..39ba7b9 100644
--- a/ql/src/test/results/clientpositive/spark/union16.q.out
+++ b/ql/src/test/results/clientpositive/spark/union16.q.out
@@ -79,21 +79,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: value (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                       Group By Operator
                         aggregations: count(1)
                         mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
                           sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -101,10 +99,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/spark/union2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union2.q.out b/ql/src/test/results/clientpositive/spark/union2.q.out
index 8b516b5..3c5b075 100644
--- a/ql/src/test/results/clientpositive/spark/union2.q.out
+++ b/ql/src/test/results/clientpositive/spark/union2.q.out
@@ -27,21 +27,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: value (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                       Group By Operator
                         aggregations: count(1)
                         mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
                           sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -49,10 +47,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/spark/union9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union9.q.out b/ql/src/test/results/clientpositive/spark/union9.q.out
index 44c5f6b..92499c0 100644
--- a/ql/src/test/results/clientpositive/spark/union9.q.out
+++ b/ql/src/test/results/clientpositive/spark/union9.q.out
@@ -29,21 +29,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: value (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                       Group By Operator
                         aggregations: count(1)
                         mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
                           sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -51,10 +49,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/spark/union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_view.q.out b/ql/src/test/results/clientpositive/spark/union_view.q.out
index d9d9a5b..cce7710 100644
--- a/ql/src/test/results/clientpositive/spark/union_view.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_view.q.out
@@ -641,8 +641,6 @@ STAGE PLANS:
                   filterExpr: (ds = '1') (type: boolean)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: '1' (type: string)
-                    outputColumnNames: _col0
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -665,8 +663,6 @@ STAGE PLANS:
                     predicate: (ds = '1') (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
-                      expressions: '1' (type: string)
-                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Select Operator
                         Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -689,8 +685,6 @@ STAGE PLANS:
                     predicate: (ds = '1') (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
-                      expressions: '1' (type: string)
-                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Select Operator
                         Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -745,8 +739,6 @@ STAGE PLANS:
                     predicate: (ds = '2') (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
-                      expressions: '2' (type: string)
-                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Select Operator
                         Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -766,8 +758,6 @@ STAGE PLANS:
                   filterExpr: (ds = '2') (type: boolean)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: '2' (type: string)
-                    outputColumnNames: _col0
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -790,8 +780,6 @@ STAGE PLANS:
                     predicate: (ds = '2') (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
-                      expressions: '2' (type: string)
-                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Select Operator
                         Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -846,8 +834,6 @@ STAGE PLANS:
                     predicate: (ds = '3') (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
-                      expressions: '3' (type: string)
-                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Select Operator
                         Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -870,8 +856,6 @@ STAGE PLANS:
                     predicate: (ds = '3') (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
-                      expressions: '3' (type: string)
-                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Select Operator
                         Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -891,8 +875,6 @@ STAGE PLANS:
                   filterExpr: (ds = '3') (type: boolean)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: '3' (type: string)
-                    outputColumnNames: _col0
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       Statistics: Num rows: 1002 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1039,8 +1021,6 @@ STAGE PLANS:
                     predicate: (ds = '4') (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
-                      expressions: '4' (type: string)
-                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Select Operator
                         Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1063,8 +1043,6 @@ STAGE PLANS:
                     predicate: (ds = '4') (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
-                      expressions: '4' (type: string)
-                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Select Operator
                         Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1084,8 +1062,6 @@ STAGE PLANS:
                   filterExpr: (ds = '4') (type: boolean)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: '4' (type: string)
-                    outputColumnNames: _col0
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       Statistics: Num rows: 502 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/tez/union2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/union2.q.out b/ql/src/test/results/clientpositive/tez/union2.q.out
index 672faf2..c127089 100644
--- a/ql/src/test/results/clientpositive/tez/union2.q.out
+++ b/ql/src/test/results/clientpositive/tez/union2.q.out
@@ -29,41 +29,37 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: value (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                       Group By Operator
                         aggregations: count(1)
                         mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
                           sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: value (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                       Group By Operator
                         aggregations: count(1)
                         mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
                           sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
@@ -71,10 +67,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/tez/union9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/union9.q.out b/ql/src/test/results/clientpositive/tez/union9.q.out
index ca3e532..ee26daf 100644
--- a/ql/src/test/results/clientpositive/tez/union9.q.out
+++ b/ql/src/test/results/clientpositive/tez/union9.q.out
@@ -32,61 +32,55 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: value (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                       Group By Operator
                         aggregations: count(1)
                         mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
                           sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: value (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                       Group By Operator
                         aggregations: count(1)
                         mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
                           sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: value (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                       Group By Operator
                         aggregations: count(1)
                         mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
                           sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
@@ -94,10 +88,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/tez/vector_null_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_null_projection.q.out b/ql/src/test/results/clientpositive/tez/vector_null_projection.q.out
index 79802da..88587e9 100644
--- a/ql/src/test/results/clientpositive/tez/vector_null_projection.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_null_projection.q.out
@@ -110,8 +110,6 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: null (type: void)
-                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
                       Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
@@ -131,8 +129,6 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: null (type: void)
-                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
                       Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/union16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union16.q.out b/ql/src/test/results/clientpositive/union16.q.out
index 9f7dd68..f503440 100644
--- a/ql/src/test/results/clientpositive/union16.q.out
+++ b/ql/src/test/results/clientpositive/union16.q.out
@@ -74,513 +74,463 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 12500 Data size: 132800 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 12500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/f2e46a2a/ql/src/test/results/clientpositive/union2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union2.q.out b/ql/src/test/results/clientpositive/union2.q.out
index 69559a4..5007d01 100644
--- a/ql/src/test/results/clientpositive/union2.q.out
+++ b/ql/src/test/results/clientpositive/union2.q.out
@@ -22,53 +22,49 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: value (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Union
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Select Operator
-                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                   Group By Operator
                     aggregations: count(1)
                     mode: hash
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat


[18/27] hive git commit: HIVE-12503 : GBY-Join transpose rule may go in infinite loop (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by om...@apache.org.
HIVE-12503 : GBY-Join transpose rule may go in infinite loop (Ashutosh Chauhan via Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a8e61c27
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a8e61c27
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a8e61c27

Branch: refs/heads/master-fixed
Commit: a8e61c27b63e7cacbd817e848a06a17ee2208132
Parents: 18ca715
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Thu Nov 26 11:39:50 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:37 2015 -0800

----------------------------------------------------------------------
 .../rules/HiveAggregateJoinTransposeRule.java   |  17 ++-
 .../queries/clientpositive/cbo_rp_auto_join1.q  |   2 +-
 .../clientpositive/cbo_rp_auto_join1.q.out      | 125 +++++++++++++-----
 .../clientpositive/groupby_join_pushdown.q.out  | 128 +++++--------------
 4 files changed, 138 insertions(+), 134 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a8e61c27/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateJoinTransposeRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateJoinTransposeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateJoinTransposeRule.java
index c59af39..8cbaed0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateJoinTransposeRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateJoinTransposeRule.java
@@ -17,6 +17,7 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
 
 import org.apache.calcite.linq4j.Ord;
+import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.plan.RelOptRuleCall;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
@@ -295,15 +296,13 @@ public class HiveAggregateJoinTransposeRule extends AggregateJoinTransposeRule {
           Mappings.apply(mapping, aggregate.getGroupSet()),
           Mappings.apply2(mapping, aggregate.getGroupSets()), newAggCalls);
     }
-    call.transformTo(r);
-    // Add original tree as well for potential alternative transformation.
-    // This is modeled after LoptOptimizeJoinRule::findBestOrderings() in
-    // which rule adds multiple transformations and Planner picks the cheapest one.
-    // Hep planner will automatically pick the one with lower cost among two.
-    // For details, see: HepPlanner:applyTransformationResults()
-    // In this case, if ndv is close to # of rows, i.e., group by is not resulting
-    // in any deduction, doing this transformation is not useful.
-    call.transformTo(aggregate);
+
+    // Make a cost based decision to pick cheaper plan
+    RelOptCost afterCost = RelMetadataQuery.getCumulativeCost(r);
+    RelOptCost beforeCost = RelMetadataQuery.getCumulativeCost(aggregate);
+    if (afterCost.isLt(beforeCost)) {
+      call.transformTo(r);
+    }
   }
 
   /** Computes the closure of a set of columns according to a given list of

http://git-wip-us.apache.org/repos/asf/hive/blob/a8e61c27/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
index b906db2..cbfb5d5 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
@@ -3,7 +3,7 @@ set hive.stats.fetch.column.stats=true;
 ;
 
 set hive.exec.reducers.max = 1;
-
+set hive.transpose.aggr.join=true;
 -- SORT_QUERY_RESULTS
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a8e61c27/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
index 6537a8a..59a2f12 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
@@ -933,8 +933,10 @@ select count(*) from
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-2 depends on stages: Stage-1, Stage-4
+  Stage-3 depends on stages: Stage-2
+  Stage-4 is a root stage
+  Stage-0 depends on stages: Stage-3
 
 STAGE PLANS:
   Stage: Stage-1
@@ -947,41 +949,67 @@ STAGE PLANS:
               predicate: (key + 1) is not null (type: boolean)
               Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
-                expressions: key (type: int)
+                expressions: (key + 1) (type: int)
                 outputColumnNames: key
                 Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: (key + 1) (type: int)
-                  sort order: +
-                  Map-reduce partition columns: (key + 1) (type: int)
-                  Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count()
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: key, $f1
+          Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
           TableScan
-            alias: subq2:a
-            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: (key + 1) is not null (type: boolean)
-              Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: key (type: int)
-                outputColumnNames: key
-                Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: (key + 1) (type: int)
-                  sort order: +
-                  Map-reduce partition columns: (key + 1) (type: int)
-                  Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+            Reduce Output Operator
+              key expressions: key (type: int)
+              sort order: +
+              Map-reduce partition columns: key (type: int)
+              Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+              value expressions: $f1 (type: bigint)
+          TableScan
+            Reduce Output Operator
+              key expressions: key (type: int)
+              sort order: +
+              Map-reduce partition columns: key (type: int)
+              Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+              value expressions: $f1 (type: bigint)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 (key + 1) (type: int)
-            1 (key + 1) (type: int)
-          Statistics: Num rows: 5 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+            0 key (type: int)
+            1 key (type: int)
+          outputColumnNames: $f1, $f10
+          Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            Statistics: Num rows: 5 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+            expressions: ($f1 * $f10) (type: bigint)
+            outputColumnNames: $f4
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             Group By Operator
-              aggregations: count()
+              aggregations: $sum0($f4)
               mode: hash
               outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
@@ -992,7 +1020,7 @@ STAGE PLANS:
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                     serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-2
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -1002,7 +1030,7 @@ STAGE PLANS:
               value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0)
+          aggregations: $sum0(VALUE._col0)
           mode: mergepartial
           outputColumnNames: $f0
           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1014,6 +1042,45 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: subq2:a
+            Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: (key + 1) is not null (type: boolean)
+              Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: (key + 1) (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count()
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: key, $f1
+          Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
   Stage: Stage-0
     Fetch Operator
       limit: -1

http://git-wip-us.apache.org/repos/asf/hive/blob/a8e61c27/ql/src/test/results/clientpositive/groupby_join_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_join_pushdown.q.out b/ql/src/test/results/clientpositive/groupby_join_pushdown.q.out
index 17df98f..c18e62f 100644
--- a/ql/src/test/results/clientpositive/groupby_join_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/groupby_join_pushdown.q.out
@@ -540,10 +540,8 @@ GROUP BY f.ctinyint, g.ctinyint
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -559,49 +557,28 @@ STAGE PLANS:
                 expressions: ctinyint (type: tinyint), cint (type: int), cbigint (type: bigint)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: sum(_col2)
-                  keys: _col0 (type: tinyint), _col1 (type: int)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions: _col1 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: int)
                   Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint), _col1 (type: int)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: int)
-                    Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col2 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: tinyint), KEY._col1 (type: int)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col1 (type: int)
-              sort order: +
-              Map-reduce partition columns: _col1 (type: int)
-              Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: tinyint), _col2 (type: bigint)
+                  value expressions: _col0 (type: tinyint), _col2 (type: bigint)
           TableScan
-            Reduce Output Operator
-              key expressions: _col1 (type: int)
-              sort order: +
-              Map-reduce partition columns: _col1 (type: int)
-              Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: tinyint), _col2 (type: bigint)
+            alias: f
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: cint is not null (type: boolean)
+              Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: ctinyint (type: tinyint), cint (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col1 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: int)
+                  Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: tinyint)
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -609,18 +586,18 @@ STAGE PLANS:
           keys:
             0 _col1 (type: int)
             1 _col1 (type: int)
-          outputColumnNames: _col0, _col2, _col3, _col5
-          Statistics: Num rows: 3379 Data size: 726540 Basic stats: COMPLETE Column stats: NONE
+          outputColumnNames: _col0, _col2, _col3
+          Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: tinyint), _col3 (type: tinyint), (_col2 * _col5) (type: bigint)
-            outputColumnNames: _col0, _col3, _col6
-            Statistics: Num rows: 3379 Data size: 726540 Basic stats: COMPLETE Column stats: NONE
+            expressions: _col0 (type: tinyint), _col3 (type: tinyint), _col2 (type: bigint)
+            outputColumnNames: _col0, _col3, _col2
+            Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: sum(_col6)
+              aggregations: sum(_col2)
               keys: _col0 (type: tinyint), _col3 (type: tinyint)
               mode: hash
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 3379 Data size: 726540 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
                 table:
@@ -628,7 +605,7 @@ STAGE PLANS:
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                     serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-3
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -636,7 +613,7 @@ STAGE PLANS:
               key expressions: _col0 (type: tinyint), _col1 (type: tinyint)
               sort order: ++
               Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: tinyint)
-              Statistics: Num rows: 3379 Data size: 726540 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -644,54 +621,15 @@ STAGE PLANS:
           keys: KEY._col0 (type: tinyint), KEY._col1 (type: tinyint)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1689 Data size: 363162 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 3379 Data size: 726540 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1689 Data size: 363162 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3379 Data size: 726540 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: cint is not null (type: boolean)
-              Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: ctinyint (type: tinyint), cint (type: int)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: tinyint), _col1 (type: int)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint), _col1 (type: int)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: int)
-                    Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col2 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: tinyint), KEY._col1 (type: int)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1


[04/27] hive git commit: HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs (Ashutosh Chauhan via Jason Dere)

Posted by om...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
index eeb18b0..93a7ca4 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
@@ -139,6 +139,8 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -165,37 +167,14 @@ STAGE PLANS:
                         expressions: _col0 (type: int), _col7 (type: string)
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          GlobalTableId: 1
-#### A masked pattern was here ####
-                          NumFilesPerFileSink: 1
-                          Static Partition Specification: ds=1/
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
                           Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                          table:
-                              input format: org.apache.hadoop.mapred.TextInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                              properties:
-                                SORTBUCKETCOLSPREFIX TRUE
-                                bucket_count 16
-                                bucket_field_name key
-                                columns key,value
-                                columns.comments 
-                                columns.types int:string
-#### A masked pattern was here ####
-                                name default.test_table3
-                                partition_columns ds
-                                partition_columns.types string
-                                serialization.ddl struct test_table3 { i32 key, string value}
-                                serialization.format 1
-                                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                              name: default.test_table3
-                          TotalFiles: 1
-                          GatherStats: true
-                          MultiFileSpray: false
+                          tag: -1
+                          value expressions: _col1 (type: string)
+                          auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -249,6 +228,44 @@ STAGE PLANS:
                   name: default.test_table1
             Truncated Path -> Alias:
               /test_table1/ds=1 [a]
+        Reducer 2 
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 1
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 16
+                  Static Partition Specification: ds=1/
+                  Statistics: Num rows: 825 Data size: 8764 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        SORTBUCKETCOLSPREFIX TRUE
+                        bucket_count 16
+                        bucket_field_name key
+                        columns key,value
+                        columns.comments 
+                        columns.types int:string
+#### A masked pattern was here ####
+                        name default.test_table3
+                        partition_columns ds
+                        partition_columns.types string
+                        serialization.ddl struct test_table3 { i32 key, string value}
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+                  TotalFiles: 16
+                  GatherStats: true
+                  MultiFileSpray: true
 
   Stage: Stage-0
     Move Operator
@@ -406,6 +423,8 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -432,37 +451,14 @@ STAGE PLANS:
                         expressions: _col0 (type: int), concat(_col1, _col7) (type: string)
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 1696 Data size: 18097 Basic stats: COMPLETE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          GlobalTableId: 1
-#### A masked pattern was here ####
-                          NumFilesPerFileSink: 1
-                          Static Partition Specification: ds=2/
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
                           Statistics: Num rows: 1696 Data size: 18097 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                          table:
-                              input format: org.apache.hadoop.mapred.TextInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                              properties:
-                                SORTBUCKETCOLSPREFIX TRUE
-                                bucket_count 16
-                                bucket_field_name key
-                                columns key,value
-                                columns.comments 
-                                columns.types int:string
-#### A masked pattern was here ####
-                                name default.test_table3
-                                partition_columns ds
-                                partition_columns.types string
-                                serialization.ddl struct test_table3 { i32 key, string value}
-                                serialization.format 1
-                                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                              name: default.test_table3
-                          TotalFiles: 1
-                          GatherStats: true
-                          MultiFileSpray: false
+                          tag: -1
+                          value expressions: _col1 (type: string)
+                          auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -516,6 +512,44 @@ STAGE PLANS:
                   name: default.test_table3
             Truncated Path -> Alias:
               /test_table3/ds=1 [a]
+        Reducer 2 
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1696 Data size: 18097 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 1
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 16
+                  Static Partition Specification: ds=2/
+                  Statistics: Num rows: 1696 Data size: 18097 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        SORTBUCKETCOLSPREFIX TRUE
+                        bucket_count 16
+                        bucket_field_name key
+                        columns key,value
+                        columns.comments 
+                        columns.types int:string
+#### A masked pattern was here ####
+                        name default.test_table3
+                        partition_columns ds
+                        partition_columns.types string
+                        serialization.ddl struct test_table3 { i32 key, string value}
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+                  TotalFiles: 16
+                  GatherStats: true
+                  MultiFileSpray: true
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/spark/stats9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats9.q.out b/ql/src/test/results/clientpositive/spark/stats9.q.out
index 7eae829..70175b2 100644
--- a/ql/src/test/results/clientpositive/spark/stats9.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats9.q.out
@@ -65,7 +65,7 @@ Retention:          	0
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	2                   
 	numRows             	1000                
 	rawDataSize         	10603               
 	totalSize           	11603               

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/stats9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats9.q.out b/ql/src/test/results/clientpositive/stats9.q.out
index e7c7743..e00fc80 100644
--- a/ql/src/test/results/clientpositive/stats9.q.out
+++ b/ql/src/test/results/clientpositive/stats9.q.out
@@ -62,7 +62,7 @@ Retention:          	0
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	2                   
 	numRows             	1000                
 	rawDataSize         	10603               
 	totalSize           	11603               

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
index bc46852..1156feb 100644
--- a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
+++ b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
@@ -904,10 +904,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -947,10 +947,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1002,10 +1002,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1045,10 +1045,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1100,10 +1100,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1143,10 +1143,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1249,10 +1249,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1292,10 +1292,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 


[13/27] hive git commit: HIVE-12487 : Fix broken MiniLlap tests (Aleksei Statkevich via Ashutosh Chauhan)

Posted by om...@apache.org.
HIVE-12487 : Fix broken MiniLlap tests (Aleksei Statkevich via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/39a82524
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/39a82524
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/39a82524

Branch: refs/heads/master-fixed
Commit: 39a82524ce0dd6d5d30629a9ea02a8bedde2cfd3
Parents: f679a5e
Author: Aleksei Statkevich <me...@gmail.com>
Authored: Sat Nov 21 23:44:00 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:36 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/39a82524/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
index 532d242..8ebfe69 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
@@ -140,7 +140,7 @@ public class HiveSplitGenerator extends InputInitializer {
               TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE,
               TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE_DEFAULT);
           final long preferredSplitSize = Math.min(blockSize / 2, minGrouping);
-          HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, preferredSplitSize);
+          HiveConf.setLongVar(jobConf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, preferredSplitSize);
           LOG.info("The preferred split size is " + preferredSplitSize);
         }
 


[06/27] hive git commit: HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs (Ashutosh Chauhan via Jason Dere)

Posted by om...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q b/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
index 9110dcc..82c18e2 100644
--- a/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
+++ b/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/delete_all_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_all_partitioned.q b/ql/src/test/queries/clientpositive/delete_all_partitioned.q
index f082b6d..122b3e2 100644
--- a/ql/src/test/queries/clientpositive/delete_all_partitioned.q
+++ b/ql/src/test/queries/clientpositive/delete_all_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/delete_orig_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_orig_table.q b/ql/src/test/queries/clientpositive/delete_orig_table.q
index fd23f4b..88cc830 100644
--- a/ql/src/test/queries/clientpositive/delete_orig_table.q
+++ b/ql/src/test/queries/clientpositive/delete_orig_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/delete_orig_table;
 dfs -copyFromLocal ../../data/files/alltypesorc ${system:test.tmp.dir}/delete_orig_table/00000_0; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/delete_tmp_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_tmp_table.q b/ql/src/test/queries/clientpositive/delete_tmp_table.q
index eb6c095..c7d8aa6 100644
--- a/ql/src/test/queries/clientpositive/delete_tmp_table.q
+++ b/ql/src/test/queries/clientpositive/delete_tmp_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/delete_where_no_match.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_where_no_match.q b/ql/src/test/queries/clientpositive/delete_where_no_match.q
index 8ed979d..f13dd73 100644
--- a/ql/src/test/queries/clientpositive/delete_where_no_match.q
+++ b/ql/src/test/queries/clientpositive/delete_where_no_match.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q b/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
index dac5375..de1ca36 100644
--- a/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
+++ b/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/delete_where_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_where_partitioned.q b/ql/src/test/queries/clientpositive/delete_where_partitioned.q
index f84f26a..2fb950f 100644
--- a/ql/src/test/queries/clientpositive/delete_where_partitioned.q
+++ b/ql/src/test/queries/clientpositive/delete_where_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/delete_whole_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/delete_whole_partition.q b/ql/src/test/queries/clientpositive/delete_whole_partition.q
index 8228a32..3d6c1e5 100644
--- a/ql/src/test/queries/clientpositive/delete_whole_partition.q
+++ b/ql/src/test/queries/clientpositive/delete_whole_partition.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
index d7f9ac8..c67426f 100644
--- a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
+++ b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q
index 95ae6e3..91fe7c5 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_bucketing.q
@@ -15,8 +15,8 @@ load data local inpath '../../data/files/sortdp.txt' overwrite into table t1_sta
 
 set hive.optimize.sort.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.sorting=true;
-set hive.enforce.bucketing=true;
+
+
 
 drop table t1;
 
@@ -44,8 +44,8 @@ dfs -cat ${hiveconf:hive.metastore.warehouse.dir}/t1/e=epart/000008_0;
 
 set hive.optimize.sort.dynamic.partition=false;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.sorting=true;
-set hive.enforce.bucketing=true;
+
+
 
 -- disable sorted dynamic partition optimization to make sure the results are correct
 drop table t1;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
index 3d0cdcd..422b711 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
@@ -5,8 +5,8 @@ set hive.exec.max.dynamic.partitions=1000;
 set hive.exec.max.dynamic.partitions.pernode=1000;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
-set hive.enforce.bucketing=false;
-set hive.enforce.sorting=false;
+
+
 
 create table over1k(
            t tinyint,
@@ -67,8 +67,8 @@ insert overwrite table over1k_part_limit_orc partition(ds="foo", t) select si,i,
 insert overwrite table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
 insert overwrite table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization
 explain insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
index a1a87d8..2d21f32 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
@@ -4,8 +4,8 @@ set hive.exec.dynamic.partition=true;
 set hive.exec.max.dynamic.partitions=1000;
 set hive.exec.max.dynamic.partitions.pernode=1000;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.bucketing=false;
-set hive.enforce.sorting=false;
+
+
 
 create table over1k(
            t tinyint,
@@ -61,8 +61,8 @@ insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,
 insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27;
 insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27;
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization
 explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
index c18f1cc..8fd79d6 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
@@ -4,8 +4,8 @@ set hive.exec.dynamic.partition=true;
 set hive.exec.max.dynamic.partitions=1000;
 set hive.exec.max.dynamic.partitions.pernode=1000;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.bucketing=false;
-set hive.enforce.sorting=false;
+
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
index a2f2c77..d1d1851 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization_acid.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 set hive.optimize.sort.dynamic.partition=false;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q b/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
index 9556eed..371fd75 100644
--- a/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
+++ b/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
@@ -1,7 +1,7 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q b/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
index 69687df..b52e740 100644
--- a/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
+++ b/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/enforce_order.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/enforce_order.q b/ql/src/test/queries/clientpositive/enforce_order.q
index 6a303c3..da18684 100644
--- a/ql/src/test/queries/clientpositive/enforce_order.q
+++ b/ql/src/test/queries/clientpositive/enforce_order.q
@@ -1,7 +1,7 @@
 drop table table_asc;
 drop table table_desc;
 
-set hive.enforce.sorting = true;
+
 
 create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS;
 create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/explainuser_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_1.q b/ql/src/test/queries/clientpositive/explainuser_1.q
index 7ae1ed6..785bb07 100644
--- a/ql/src/test/queries/clientpositive/explainuser_1.q
+++ b/ql/src/test/queries/clientpositive/explainuser_1.q
@@ -293,7 +293,7 @@ create table if not exists nzhang_ctas3 as select key, value from src sort by ke
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 explain create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
@@ -334,8 +334,8 @@ CREATE TABLE smb_input(key int, value int);
 LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input;
 LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input;
 
-set hive.enforce.sorting = true;
-set hive.enforce.bucketing = true;
+
+;
 
 CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
 CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/explainuser_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_2.q b/ql/src/test/queries/clientpositive/explainuser_2.q
index 560a086..da107dc 100644
--- a/ql/src/test/queries/clientpositive/explainuser_2.q
+++ b/ql/src/test/queries/clientpositive/explainuser_2.q
@@ -174,8 +174,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/explainuser_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_3.q b/ql/src/test/queries/clientpositive/explainuser_3.q
index f604d38..dd86b76 100644
--- a/ql/src/test/queries/clientpositive/explainuser_3.q
+++ b/ql/src/test/queries/clientpositive/explainuser_3.q
@@ -2,7 +2,7 @@ set hive.explain.user=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
 
@@ -140,8 +140,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1.q b/ql/src/test/queries/clientpositive/groupby_sort_1.q
index ed888bb..4909f16 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_1.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_1.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_10.q b/ql/src/test/queries/clientpositive/groupby_sort_10.q
index b3ddd42..4c650f9 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_10.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_10.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_11.q b/ql/src/test/queries/clientpositive/groupby_sort_11.q
index 19063f6..32a9658 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_11.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_11.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1_23.q b/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
index a6e18c7..d81e190 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_2.q b/ql/src/test/queries/clientpositive/groupby_sort_2.q
index 1574048..8e5a82e 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_2.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_2.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_3.q b/ql/src/test/queries/clientpositive/groupby_sort_3.q
index b835f95..1686170 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_3.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_3.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_4.q b/ql/src/test/queries/clientpositive/groupby_sort_4.q
index a61c551..db1a884 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_4.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_4.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_5.q b/ql/src/test/queries/clientpositive/groupby_sort_5.q
index 0d4ba42..98eed1f 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_5.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_5.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_6.q b/ql/src/test/queries/clientpositive/groupby_sort_6.q
index 752b927..2ed58d8 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_6.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_6.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_7.q b/ql/src/test/queries/clientpositive/groupby_sort_7.q
index 3e3ba7a..e8cc047 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_7.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_7.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_8.q b/ql/src/test/queries/clientpositive/groupby_sort_8.q
index f0d3a59..98f363d 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_8.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_8.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_9.q b/ql/src/test/queries/clientpositive/groupby_sort_9.q
index 296336d..eadcbb8 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_9.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_9.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q b/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
index 76a1725..9a7104d 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 set hive.groupby.skewindata=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q b/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
index 1b24aec..0a94b3a 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 set hive.groupby.skewindata=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_test_1.q b/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
index 70eef33..faf5c99 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/infer_bucket_sort_bucketed_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_bucketed_table.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_bucketed_table.q
index d69f49f..72682c5 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_bucketed_table.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_bucketed_table.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.infer.bucket.sort=true;
 
 -- Test writing to a bucketed table, the output should be bucketed by the bucketing key into the

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
index 16e8715..becbc9d 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
@@ -1,6 +1,6 @@
 set hive.exec.infer.bucket.sort=true;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 
 -- This tests inferring how data is bucketed/sorted from the operators in the reducer
 -- and populating that information in partitions' metadata, in particular, this tests

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q b/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q
index c544589..62cc2f9 100644
--- a/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q
+++ b/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 create table acid_dynamic(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q b/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
index a29b1e7..9e61fd7 100644
--- a/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
+++ b/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_notbucketed(a int, b varchar(128)) stored as orc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_into_with_schema2.q b/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
index a5352ec..0f21289 100644
--- a/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
+++ b/ql/src/test/queries/clientpositive/insert_into_with_schema2.q
@@ -1,6 +1,6 @@
 -- SORT_QUERY_RESULTS;
 
-set hive.enforce.bucketing=true;
+
 
 create table studenttab10k (age2 int);
 insert into studenttab10k values(1);

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q b/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
index 639cb31..10a1d68 100644
--- a/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
+++ b/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_orig_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_orig_table.q b/ql/src/test/queries/clientpositive/insert_orig_table.q
index c38bd5a..a969d1b 100644
--- a/ql/src/test/queries/clientpositive/insert_orig_table.q
+++ b/ql/src/test/queries/clientpositive/insert_orig_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_iot(
     ctinyint TINYINT,

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_update_delete.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_update_delete.q b/ql/src/test/queries/clientpositive/insert_update_delete.q
index 8dbb77c..170a18f 100644
--- a/ql/src/test/queries/clientpositive/insert_update_delete.q
+++ b/ql/src/test/queries/clientpositive/insert_update_delete.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q b/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q
index fc0cb10..3530507 100644
--- a/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q
+++ b/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_notbucketed(a int, b varchar(128)) stored as orc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q b/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
index 71e0e73..5f8b8b5 100644
--- a/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
+++ b/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
@@ -1,7 +1,7 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table ivdp(i int,
                  de decimal(5,2),

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q b/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
index d0e7b0f..3b9e98b 100644
--- a/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
+++ b/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_ivnp(ti tinyint,
                  si smallint,

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_values_orig_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_orig_table.q b/ql/src/test/queries/clientpositive/insert_values_orig_table.q
index 8fef549..63a9263 100644
--- a/ql/src/test/queries/clientpositive/insert_values_orig_table.q
+++ b/ql/src/test/queries/clientpositive/insert_values_orig_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_ivot(
     ctinyint TINYINT,

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_values_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_partitioned.q b/ql/src/test/queries/clientpositive/insert_values_partitioned.q
index c8223f7..e78b92e 100644
--- a/ql/src/test/queries/clientpositive/insert_values_partitioned.q
+++ b/ql/src/test/queries/clientpositive/insert_values_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_ivp(ti tinyint,
                  si smallint,

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_values_tmp_table.q b/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
index 4e4c39e..07737c0 100644
--- a/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
+++ b/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q b/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
index 50f9361..1fe911f 100644
--- a/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
+++ b/ql/src/test/queries/clientpositive/insertoverwrite_bucket.q
@@ -15,8 +15,8 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY ',';
 insert into table bucketinput values ("firstinsert1");
 insert into table bucketinput values ("firstinsert2");
 insert into table bucketinput values ("firstinsert3");
-set hive.enforce.bucketing = true; 
-set hive.enforce.sorting=true;
+; 
+
 insert overwrite table bucketoutput1 select * from bucketinput where data like 'first%'; 
 CREATE TABLE temp1
 (

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/join_nullsafe.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join_nullsafe.q b/ql/src/test/queries/clientpositive/join_nullsafe.q
index d6eda77..e96cc71 100644
--- a/ql/src/test/queries/clientpositive/join_nullsafe.q
+++ b/ql/src/test/queries/clientpositive/join_nullsafe.q
@@ -35,8 +35,8 @@ CREATE TABLE smb_input(key int, value int);
 LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input;
 LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input;
 
-set hive.enforce.sorting = true;
-set hive.enforce.bucketing = true;
+
+;
 
 -- smbs
 CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/load_dyn_part2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_dyn_part2.q b/ql/src/test/queries/clientpositive/load_dyn_part2.q
index e804971..ba9e7a8 100644
--- a/ql/src/test/queries/clientpositive/load_dyn_part2.q
+++ b/ql/src/test/queries/clientpositive/load_dyn_part2.q
@@ -8,7 +8,7 @@ create table if not exists nzhang_part_bucket (key string, value string)
 describe extended nzhang_part_bucket;
 
 set hive.merge.mapfiles=false;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition=true;
 
 explain

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/mergejoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mergejoin.q b/ql/src/test/queries/clientpositive/mergejoin.q
index 95f3d01..6cd3929 100644
--- a/ql/src/test/queries/clientpositive/mergejoin.q
+++ b/ql/src/test/queries/clientpositive/mergejoin.q
@@ -27,8 +27,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/orc_empty_files.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_empty_files.q b/ql/src/test/queries/clientpositive/orc_empty_files.q
index d3cbc5a..dd0e81a 100644
--- a/ql/src/test/queries/clientpositive/orc_empty_files.q
+++ b/ql/src/test/queries/clientpositive/orc_empty_files.q
@@ -4,7 +4,7 @@ ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde'
 STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' 
 OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat';
 
-set hive.enforce.bucketing=true;
+
 set hive.exec.reducers.max = 1;
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
index f4d4d73..886f906 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat14.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) PARTITIONED by (ds string)

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/quotedid_smb.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/quotedid_smb.q b/ql/src/test/queries/clientpositive/quotedid_smb.q
index 38d1b99..25d1f0e 100644
--- a/ql/src/test/queries/clientpositive/quotedid_smb.q
+++ b/ql/src/test/queries/clientpositive/quotedid_smb.q
@@ -2,8 +2,8 @@
 set hive.support.quoted.identifiers=column;
 
 
-set hive.enforce.bucketing = true;  
-set hive.enforce.sorting = true;  
+;  
+  
 create table src_b(`x+1` string, `!@#$%^&*()_q` string)  
 clustered by (`!@#$%^&*()_q`) sorted by (`!@#$%^&*()_q`) into 2 buckets
 ;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/reduce_deduplicate.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/reduce_deduplicate.q b/ql/src/test/queries/clientpositive/reduce_deduplicate.q
index 2e26adc..5386590 100644
--- a/ql/src/test/queries/clientpositive/reduce_deduplicate.q
+++ b/ql/src/test/queries/clientpositive/reduce_deduplicate.q
@@ -1,5 +1,5 @@
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 set hive.exec.script.trust = true;
 set hive.optimize.reducededuplication = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/sample10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample10.q b/ql/src/test/queries/clientpositive/sample10.q
index d9fe744..3aec841 100644
--- a/ql/src/test/queries/clientpositive/sample10.q
+++ b/ql/src/test/queries/clientpositive/sample10.q
@@ -2,7 +2,7 @@ set hive.exec.submitviachild=true;
 set hive.exec.submit.local.task.via.child=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.bucketing=true;
+
 set hive.exec.reducers.max=4;
 set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 set hive.default.fileformat=RCFILE;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin9.q b/ql/src/test/queries/clientpositive/smb_mapjoin9.q
index b959022..06820a0 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin9.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin9.q
@@ -28,8 +28,8 @@ FROM hive_test_smb_bucket1 a JOIN
 hive_test_smb_bucket2 b
 ON a.key = b.key WHERE a.ds = '2010-10-15' and b.ds='2010-10-15' and  b.key IS NOT NULL;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.exec.reducers.max = 1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_11.q b/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
index 9300638..97e3b08 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 
@@ -17,8 +17,8 @@ FROM src
 INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
 INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *;
 
-set hive.enforce.bucketing=false;
-set hive.enforce.sorting=false;
+
+
 
 -- Create a bucketed table
 CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_12.q b/ql/src/test/queries/clientpositive/smb_mapjoin_12.q
index 9f9748c..7a506ad 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_12.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_12.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 
@@ -19,8 +19,8 @@ INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT *
 INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT *
 INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3') SELECT *;
 
-set hive.enforce.bucketing=false;
-set hive.enforce.sorting=false;
+
+
 
 -- Create a bucketed table
 CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_13.q b/ql/src/test/queries/clientpositive/smb_mapjoin_13.q
index 056bccd..ca15fc3 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_13.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_13.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_14.q b/ql/src/test/queries/clientpositive/smb_mapjoin_14.q
index f03f92e..b8b939c 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_14.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_15.q b/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
index 1e77a60..4a16c0d 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_16.q b/ql/src/test/queries/clientpositive/smb_mapjoin_16.q
index 3a3a872..bff11dd 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_16.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_16.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_17.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_17.q b/ql/src/test/queries/clientpositive/smb_mapjoin_17.q
index 238f7e0..276bfcc 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_17.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_17.q
@@ -1,8 +1,8 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_18.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_18.q b/ql/src/test/queries/clientpositive/smb_mapjoin_18.q
index 02e3fb5..a89bc1c 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_18.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_18.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_19.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_19.q b/ql/src/test/queries/clientpositive/smb_mapjoin_19.q
index ca48f61..4695e5a 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_19.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_19.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_20.q b/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
index f70e7d5..aa1e9fa 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_20.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_21.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_21.q b/ql/src/test/queries/clientpositive/smb_mapjoin_21.q
index 09edfc1..08b13aa 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_21.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_21.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_22.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_22.q b/ql/src/test/queries/clientpositive/smb_mapjoin_22.q
index 676f46a..2f1a6b6 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_22.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_22.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_25.q b/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
index 683341b..498d337 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.exec.max.dynamic.partitions.pernode=1000000;
 set hive.exec.max.dynamic.partitions=1000000;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_6.q b/ql/src/test/queries/clientpositive/smb_mapjoin_6.q
index 2884a11..b50c494 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_6.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_6.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_7.q b/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
index ca1c749..d192036 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_8.q b/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
index 4b4e167..dc6a35f 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
@@ -1,6 +1,6 @@
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q
index 8002ec5..efa0178 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_1.q
@@ -1,7 +1,7 @@
 drop table table_desc1;
 drop table table_desc2;
 
-set hive.enforce.sorting = true;
+
 
 create table table_desc1(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS;
 create table table_desc2(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q
index bd0cdb2..2b787b8 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_2.q
@@ -1,7 +1,7 @@
 drop table table_desc1;
 drop table table_desc2;
 
-set hive.enforce.sorting = true;
+
 
 create table table_desc1(key string, value string) clustered by (key, value)
 sorted by (key DESC, value DESC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q
index a109878..bdc550c 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_3.q
@@ -1,7 +1,7 @@
 drop table table_desc1;
 drop table table_desc2;
 
-set hive.enforce.sorting = true;
+
 
 create table table_desc1(key string, value string) clustered by (key, value)
 sorted by (key DESC, value ASC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/sort_merge_join_desc_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_4.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_4.q
index 0bc5071..89a26fd 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_4.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_4.q
@@ -1,7 +1,7 @@
 drop table table_desc1;
 drop table table_desc2;
 
-set hive.enforce.sorting = true;
+
 
 create table table_desc1(key string, value string) clustered by (key, value)
 sorted by (key DESC, value ASC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q
index 3505db0..9f32f53 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_5.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 
 CLUSTERED BY (key) SORTED BY (key DESC) INTO 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q
index 35b0535..e733538 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_6.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q
index 65dc7f1..fe523be 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_7.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q b/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q
index 2ec0849..4c0975d 100644
--- a/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q
+++ b/ql/src/test/queries/clientpositive/sort_merge_join_desc_8.q
@@ -3,7 +3,7 @@ drop table table_desc2;
 drop table table_desc3;
 drop table table_desc4;
 
-set hive.enforce.sorting = true;
+
 
 create table table_desc1(key string, value string) clustered by (key)
 sorted by (key DESC) into 1 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/stats10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats10.q b/ql/src/test/queries/clientpositive/stats10.q
index a3f375e..2ad6a4f 100644
--- a/ql/src/test/queries/clientpositive/stats10.q
+++ b/ql/src/test/queries/clientpositive/stats10.q
@@ -1,6 +1,6 @@
 set datanucleus.cache.collections=false;
 set hive.stats.autogather=true;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q b/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q
index a06bb82..631b78d 100644
--- a/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q
+++ b/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 set hive.auto.convert.join.noconditionaltask.size=10000;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/tez_fsstat.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_fsstat.q b/ql/src/test/queries/clientpositive/tez_fsstat.q
index 90201b6..35d1f58 100644
--- a/ql/src/test/queries/clientpositive/tez_fsstat.q
+++ b/ql/src/test/queries/clientpositive/tez_fsstat.q
@@ -7,8 +7,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE t1 partitio
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE t1 partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE t1 partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 set hive.stats.dbclass=fs;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/tez_smb_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_smb_1.q b/ql/src/test/queries/clientpositive/tez_smb_1.q
index 03a1fea..b8147b8 100644
--- a/ql/src/test/queries/clientpositive/tez_smb_1.q
+++ b/ql/src/test/queries/clientpositive/tez_smb_1.q
@@ -17,8 +17,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/tez_smb_empty.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_smb_empty.q b/ql/src/test/queries/clientpositive/tez_smb_empty.q
index 2427377..67acbcc 100644
--- a/ql/src/test/queries/clientpositive/tez_smb_empty.q
+++ b/ql/src/test/queries/clientpositive/tez_smb_empty.q
@@ -19,8 +19,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/tez_smb_main.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/tez_smb_main.q b/ql/src/test/queries/clientpositive/tez_smb_main.q
index dff5112..44bb1d8 100644
--- a/ql/src/test/queries/clientpositive/tez_smb_main.q
+++ b/ql/src/test/queries/clientpositive/tez_smb_main.q
@@ -22,8 +22,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/transform_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/transform_acid.q b/ql/src/test/queries/clientpositive/transform_acid.q
index 94782f1..cf8bd24 100644
--- a/ql/src/test/queries/clientpositive/transform_acid.q
+++ b/ql/src/test/queries/clientpositive/transform_acid.q
@@ -1,7 +1,7 @@
 set hive.entity.capture.transform=true;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- EXCLUDE_OS_WINDOWS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/truncate_column_buckets.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/truncate_column_buckets.q b/ql/src/test/queries/clientpositive/truncate_column_buckets.q
index a2ce215..4375843 100644
--- a/ql/src/test/queries/clientpositive/truncate_column_buckets.q
+++ b/ql/src/test/queries/clientpositive/truncate_column_buckets.q
@@ -2,7 +2,7 @@
 
 CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE;
 
-set hive.enforce.bucketing=true;
+
 
 INSERT OVERWRITE TABLE test_tab SELECT * FROM src;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q b/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
index eb8e5a1..7534999 100644
--- a/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
+++ b/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
@@ -1,7 +1,7 @@
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_all_non_partitioned.q b/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
index 3c01825..d611925 100644
--- a/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
+++ b/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/update_all_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_all_partitioned.q b/ql/src/test/queries/clientpositive/update_all_partitioned.q
index e191d0a..d7aa24f 100644
--- a/ql/src/test/queries/clientpositive/update_all_partitioned.q
+++ b/ql/src/test/queries/clientpositive/update_all_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/update_all_types.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_all_types.q b/ql/src/test/queries/clientpositive/update_all_types.q
index 0229845..543fd09 100644
--- a/ql/src/test/queries/clientpositive/update_all_types.q
+++ b/ql/src/test/queries/clientpositive/update_all_types.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/update_orig_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_orig_table.q b/ql/src/test/queries/clientpositive/update_orig_table.q
index 416c841..f68b82d 100644
--- a/ql/src/test/queries/clientpositive/update_orig_table.q
+++ b/ql/src/test/queries/clientpositive/update_orig_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/update_orig_table;
 dfs -copyFromLocal ../../data/files/alltypesorc ${system:test.tmp.dir}/update_orig_table/00000_0; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/update_tmp_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_tmp_table.q b/ql/src/test/queries/clientpositive/update_tmp_table.q
index a896ac7..12309e5 100644
--- a/ql/src/test/queries/clientpositive/update_tmp_table.q
+++ b/ql/src/test/queries/clientpositive/update_tmp_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/update_two_cols.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_two_cols.q b/ql/src/test/queries/clientpositive/update_two_cols.q
index b1972e5..8b1719b 100644
--- a/ql/src/test/queries/clientpositive/update_two_cols.q
+++ b/ql/src/test/queries/clientpositive/update_two_cols.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/update_where_no_match.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_where_no_match.q b/ql/src/test/queries/clientpositive/update_where_no_match.q
index d578862..8e6faaf 100644
--- a/ql/src/test/queries/clientpositive/update_where_no_match.q
+++ b/ql/src/test/queries/clientpositive/update_where_no_match.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_where_non_partitioned.q b/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
index 06c688f..b7a97c2 100644
--- a/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
+++ b/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/update_where_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/update_where_partitioned.q b/ql/src/test/queries/clientpositive/update_where_partitioned.q
index 157712f..ba35e35 100644
--- a/ql/src/test/queries/clientpositive/update_where_partitioned.q
+++ b/ql/src/test/queries/clientpositive/update_where_partitioned.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q b/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
index 32be5ee..fa7fff8 100644
--- a/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientpositive/vector_auto_smb_mapjoin_14.q
@@ -1,6 +1,6 @@
 SET hive.vectorized.execution.enabled=true;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/vector_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_bucket.q b/ql/src/test/queries/clientpositive/vector_bucket.q
index 9360ce0..74cbefc 100644
--- a/ql/src/test/queries/clientpositive/vector_bucket.q
+++ b/ql/src/test/queries/clientpositive/vector_bucket.q
@@ -1,7 +1,7 @@
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 set hive.support.concurrency=true;
-set hive.enforce.bucketing=true;
+
 
 CREATE TABLE non_orc_table(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS sequencefile; 
 


[24/27] hive git commit: HIVE-12497: Remove HADOOP_CLIENT_OPTS from hive script (Prasanth Jayachandran reviewed by Gopal V)

Posted by om...@apache.org.
HIVE-12497: Remove HADOOP_CLIENT_OPTS from hive script (Prasanth Jayachandran reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e325eac9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e325eac9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e325eac9

Branch: refs/heads/master-fixed
Commit: e325eac9f6eeb8dfef5d55c4ec1b526af8cd98a6
Parents: eb1b80d
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Sun Nov 29 16:53:20 2015 -0600
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:38 2015 -0800

----------------------------------------------------------------------
 bin/ext/version.sh |  2 +-
 bin/hive           | 23 ++++++++++++++++++-----
 2 files changed, 19 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e325eac9/bin/ext/version.sh
----------------------------------------------------------------------
diff --git a/bin/ext/version.sh b/bin/ext/version.sh
index bb6449d..b6a237b 100644
--- a/bin/ext/version.sh
+++ b/bin/ext/version.sh
@@ -27,7 +27,7 @@ version () {
 
   # hadoop 20 or newer - skip the aux_jars option and hiveconf
   CLASS=org.apache.hive.common.util.HiveVersionInfo
-  exec $HADOOP jar $JAR $CLASS
+  exec $HADOOP jar $JAR $CLASS 2>> ${STDERR}
 }
 
 version_help () {

http://git-wip-us.apache.org/repos/asf/hive/blob/e325eac9/bin/hive
----------------------------------------------------------------------
diff --git a/bin/hive b/bin/hive
index 47d99f7..4818667 100755
--- a/bin/hive
+++ b/bin/hive
@@ -25,6 +25,8 @@ bin=`cd "$bin"; pwd`
 
 . "$bin"/hive-config.sh
 
+TMP_USER_DIR="/tmp/${USER}"
+STDERR="${TMP_USER_DIR}/stderr"
 SERVICE=""
 HELP=""
 while [ $# -gt 0 ]; do
@@ -193,18 +195,29 @@ if [ "$HADOOP_HOME" == "" ]; then
   exit 4;
 fi
 
-# to avoid errors from log4j2 automatic configuration loading
-export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Dlog4j.configurationFile=hive-log4j2.xml "
-
 HADOOP=$HADOOP_HOME/bin/hadoop
 if [ ! -f ${HADOOP} ]; then
   echo "Cannot find hadoop installation: \$HADOOP_HOME or \$HADOOP_PREFIX must be set or hadoop must be in the path";
   exit 4;
 fi
 
+if [ ! -d ${TMP_USER_DIR} ]; then
+  mkdir -p ${TMP_USER_DIR} 2> /dev/null
+  if [ $? -ne 0 ]; then
+    STDERR="/dev/tty"
+  fi
+fi
+
+if [ "${STDERR}" != "/dev/null" ] && [ ! -f ${STDERR} ]; then
+  touch ${STDERR} 2> /dev/null
+  if [ $? -ne 0 ]; then
+    STDERR="/dev/tty"
+  fi
+fi
+
 # Make sure we're using a compatible version of Hadoop
 if [ "x$HADOOP_VERSION" == "x" ]; then
-    HADOOP_VERSION=$($HADOOP version | awk -F"\t" '/Hadoop/ {print $0}' | cut -d' ' -f 2);
+    HADOOP_VERSION=$($HADOOP version 2>> ${STDERR} | awk -F"\t" '/Hadoop/ {print $0}' | cut -d' ' -f 2);
 fi
 
 # Save the regex to a var to workaround quoting incompatabilities
@@ -253,7 +266,7 @@ HBASE_BIN=${HBASE_BIN:-"$(which hbase)"}
 if [[ -n $HBASE_BIN ]] ; then
   # exclude ZK, PB, and Guava (See HIVE-2055)
   # depends on HBASE-8438 (hbase-0.94.14+, hbase-0.96.1+) for `hbase mapredcp` command
-  for x in $($HBASE_BIN mapredcp | tr ':' '\n') ; do
+  for x in $($HBASE_BIN mapredcp 2>> ${STDERR} | tr ':' '\n') ; do
     if [[ $x == *zookeeper* || $x == *protobuf-java* || $x == *guava* ]] ; then
       continue
     fi


[15/27] hive git commit: HIVE-8396 : Hive CliDriver command splitting can be broken when comments are present (Elliot West, reviewed by Sergey Shelukhin)

Posted by om...@apache.org.
HIVE-8396 : Hive CliDriver command splitting can be broken when comments are present (Elliot West, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0f4065e5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0f4065e5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0f4065e5

Branch: refs/heads/master-fixed
Commit: 0f4065e51f64db008c1908b4e63b8828d88ef3fb
Parents: c7a939a
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed Nov 25 15:13:27 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:36 2015 -0800

----------------------------------------------------------------------
 cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0f4065e5/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
----------------------------------------------------------------------
diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
index e04f247..e77b7f1 100644
--- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
+++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
@@ -770,6 +770,9 @@ public class CliDriver {
       if (!prefix.equals("")) {
         prefix += '\n';
       }
+      if (line.trim().startsWith("--")) {
+        continue;
+      }
       if (line.trim().endsWith(";") && !line.trim().endsWith("\\;")) {
         line = prefix + line;
         ret = cli.processLine(line, true);


[02/27] hive git commit: HIVE-12329 :Turn on limit pushdown optimization by default (Ashutosh Chauhan via Prasanth J)

Posted by om...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out
index 64a3ea2..8608187 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out
@@ -124,6 +124,8 @@ STAGE PLANS:
                     key expressions: _col0 (type: int)
                     sort order: +
                     tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)
                     auto parallelism: false
       Path -> Alias:
@@ -382,6 +384,8 @@ STAGE PLANS:
                     key expressions: _col0 (type: int)
                     sort order: +
                     tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int), _col5 (type: string)
                     auto parallelism: false
       Path -> Alias:
@@ -588,6 +592,8 @@ STAGE PLANS:
                     key expressions: _col0 (type: int)
                     sort order: +
                     tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int), _col5 (type: string)
                     auto parallelism: false
       Path -> Alias:
@@ -827,6 +833,8 @@ STAGE PLANS:
                     sort order: +
                     Statistics: Num rows: 137 Data size: 1984 Basic stats: COMPLETE Column stats: NONE
                     tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int), _col5 (type: string)
                     auto parallelism: false
       Local Work:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out b/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out
index a234ff5..932fdcc 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out
@@ -65,6 +65,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: string), _col1 (type: string)
                   sort order: ++
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
@@ -238,6 +239,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: string), _col1 (type: string)
                   sort order: ++
                   Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
         Reducer 4 
             Reduce Operator Tree:
               Select Operator
@@ -411,6 +413,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: string), _col1 (type: string)
                   sort order: ++
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
         Reducer 4 
             Reduce Operator Tree:
               Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out
index a99cb74..84f68a3 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out
@@ -262,6 +262,8 @@ STAGE PLANS:
                           sort order: ++
                           Statistics: Num rows: 378 Data size: 1514 Basic stats: COMPLETE Column stats: NONE
                           tag: -1
+                          TopN: 1
+                          TopN Hash Memory Usage: 0.1
                           auto parallelism: false
             Local Work:
               Map Reduce Local Work

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ctas.q.out b/ql/src/test/results/clientpositive/spark/ctas.q.out
index 6bb2f76..1ba74aa 100644
--- a/ql/src/test/results/clientpositive/spark/ctas.q.out
+++ b/ql/src/test/results/clientpositive/spark/ctas.q.out
@@ -51,6 +51,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
@@ -64,6 +65,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: string), _col1 (type: string)
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
@@ -194,6 +196,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
@@ -207,6 +210,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: string), _col1 (type: string)
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
@@ -337,6 +341,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: double), _col1 (type: string)
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
@@ -350,6 +355,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: double), _col1 (type: string)
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
@@ -544,6 +550,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
@@ -557,6 +564,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: string), _col1 (type: string)
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
@@ -732,6 +740,8 @@ STAGE PLANS:
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       tag: -1
+                      TopN: 10
+                      TopN Hash Memory Usage: 0.1
                       auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
@@ -797,6 +807,8 @@ STAGE PLANS:
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
                     auto parallelism: false
         Reducer 3 
             Needs Tagging: false

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out
index a6ea423..d6514c9 100644
--- a/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out
@@ -115,6 +115,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: string)
                     sort order: +
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col1 (type: double)
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
index 65bdf2c..cf173e1 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
@@ -99,6 +99,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: array<string>)
                   sort order: +
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
@@ -129,6 +130,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: map<string,string>)
                   sort order: +
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
         Reducer 5 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
index 130d15b..0cc0867 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
@@ -416,6 +416,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string), _col1 (type: bigint)
                       sort order: ++
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col2 (type: string), _col3 (type: double), _col4 (type: bigint)
                 Filter Operator
                   predicate: (KEY._col0 >= 5) (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/input1_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/input1_limit.q.out b/ql/src/test/results/clientpositive/spark/input1_limit.q.out
index 42e5478..c41093d 100644
--- a/ql/src/test/results/clientpositive/spark/input1_limit.q.out
+++ b/ql/src/test/results/clientpositive/spark/input1_limit.q.out
@@ -61,6 +61,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string), _col1 (type: string)
         Map 5 
             Map Operator Tree:
@@ -80,6 +81,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string), _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/insert_into1.q.out b/ql/src/test/results/clientpositive/spark/insert_into1.q.out
index 00e71ba..4e1726c 100644
--- a/ql/src/test/results/clientpositive/spark/insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/spark/insert_into1.q.out
@@ -43,6 +43,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -156,6 +157,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -269,6 +271,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/insert_into2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/insert_into2.q.out b/ql/src/test/results/clientpositive/spark/insert_into2.q.out
index 26bf1e6..3f77a85 100644
--- a/ql/src/test/results/clientpositive/spark/insert_into2.q.out
+++ b/ql/src/test/results/clientpositive/spark/insert_into2.q.out
@@ -43,6 +43,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -197,6 +198,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -320,6 +322,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/insert_into3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/insert_into3.q.out b/ql/src/test/results/clientpositive/spark/insert_into3.q.out
index 0531556..a6fac23 100644
--- a/ql/src/test/results/clientpositive/spark/insert_into3.q.out
+++ b/ql/src/test/results/clientpositive/spark/insert_into3.q.out
@@ -40,7 +40,7 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 4 (SORT, 1)
-        Reducer 3 <- Map 4 (SORT, 1)
+        Reducer 3 <- Map 5 (SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 4 
@@ -56,6 +56,21 @@ STAGE PLANS:
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: string)
+                      sort order: ++
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
@@ -203,6 +218,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/join_vc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_vc.q.out b/ql/src/test/results/clientpositive/spark/join_vc.q.out
index c9c243e..3197c12 100644
--- a/ql/src/test/results/clientpositive/spark/join_vc.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_vc.q.out
@@ -109,6 +109,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: string)
                     sort order: +++
                     Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 4 
             Reduce Operator Tree:
               Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out b/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
index 07cfca8..3cb6853 100644
--- a/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
+++ b/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
@@ -41,6 +41,7 @@ STAGE PLANS:
                             sort order: ++
                             Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
                             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                            TopN Hash Memory Usage: 0.1
                     Select Operator
                       expressions: array(1,2,3) (type: array<int>)
                       outputColumnNames: _col0
@@ -61,6 +62,7 @@ STAGE PLANS:
                               sort order: ++
                               Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
                               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                              TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
index 9349a1c..05ccf8b 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
@@ -80,6 +80,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 2 Data size: 172 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Limit

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/order.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/order.q.out b/ql/src/test/results/clientpositive/spark/order.q.out
index b0abedf..2ef2242 100644
--- a/ql/src/test/results/clientpositive/spark/order.q.out
+++ b/ql/src/test/results/clientpositive/spark/order.q.out
@@ -28,6 +28,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -100,6 +101,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: -
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/order2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/order2.q.out b/ql/src/test/results/clientpositive/spark/order2.q.out
index 29eb835..628ea29 100644
--- a/ql/src/test/results/clientpositive/spark/order2.q.out
+++ b/ql/src/test/results/clientpositive/spark/order2.q.out
@@ -32,6 +32,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/pcr.q.out b/ql/src/test/results/clientpositive/spark/pcr.q.out
index ca82202..82593d4 100644
--- a/ql/src/test/results/clientpositive/spark/pcr.q.out
+++ b/ql/src/test/results/clientpositive/spark/pcr.q.out
@@ -4662,6 +4662,8 @@ STAGE PLANS:
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       tag: -1
+                      TopN: 10
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
                       auto parallelism: false
             Path -> Alias:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/script_pipe.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/script_pipe.q.out b/ql/src/test/results/clientpositive/spark/script_pipe.q.out
index 083a114..9fe8433 100644
--- a/ql/src/test/results/clientpositive/spark/script_pipe.q.out
+++ b/ql/src/test/results/clientpositive/spark/script_pipe.q.out
@@ -30,6 +30,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out b/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
index 4025885..51d014e 100644
--- a/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
@@ -127,6 +127,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: string)
                     sort order: +
                     Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col1 (type: string)
         Reducer 3 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out
index 73856eb..f635562 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out
@@ -160,6 +160,8 @@ STAGE PLANS:
                           sort order: +
                           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                           tag: -1
+                          TopN: 10
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)
                           auto parallelism: false
             Path -> Alias:
@@ -445,6 +447,8 @@ STAGE PLANS:
                           sort order: +
                           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                           tag: -1
+                          TopN: 10
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)
                           auto parallelism: false
             Local Work:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out
index afb1b22..09e6d44 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out
@@ -132,6 +132,8 @@ STAGE PLANS:
                           sort order: +
                           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                           tag: -1
+                          TopN: 10
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)
                           auto parallelism: false
             Path -> Alias:
@@ -402,6 +404,8 @@ STAGE PLANS:
                           sort order: +
                           Statistics: Num rows: 137 Data size: 1984 Basic stats: COMPLETE Column stats: NONE
                           tag: -1
+                          TopN: 10
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int), _col5 (type: string)
                           auto parallelism: false
             Path -> Alias:
@@ -620,6 +624,8 @@ STAGE PLANS:
                           sort order: +
                           Statistics: Num rows: 137 Data size: 1984 Basic stats: COMPLETE Column stats: NONE
                           tag: -1
+                          TopN: 10
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int), _col5 (type: string)
                           auto parallelism: false
             Path -> Alias:
@@ -921,6 +927,8 @@ STAGE PLANS:
                           sort order: +
                           Statistics: Num rows: 137 Data size: 1984 Basic stats: COMPLETE Column stats: NONE
                           tag: -1
+                          TopN: 10
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int), _col5 (type: string)
                           auto parallelism: false
             Local Work:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/subquery_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/subquery_in.q.out b/ql/src/test/results/clientpositive/spark/subquery_in.q.out
index 442b52a..7ac4cf8 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_in.q.out
@@ -298,6 +298,7 @@ STAGE PLANS:
                     sort order: ++
                     Map-reduce partition columns: p_mfgr (type: string)
                     Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -466,6 +467,7 @@ STAGE PLANS:
                     sort order: ++
                     Map-reduce partition columns: p_mfgr (type: string)
                     Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Join Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/temp_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/temp_table.q.out b/ql/src/test/results/clientpositive/spark/temp_table.q.out
index 718a8a4..119d26d 100644
--- a/ql/src/test/results/clientpositive/spark/temp_table.q.out
+++ b/ql/src/test/results/clientpositive/spark/temp_table.q.out
@@ -174,6 +174,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -244,6 +245,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
@@ -258,6 +260,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/union3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union3.q.out b/ql/src/test/results/clientpositive/spark/union3.q.out
index b437920..a23cab3 100644
--- a/ql/src/test/results/clientpositive/spark/union3.q.out
+++ b/ql/src/test/results/clientpositive/spark/union3.q.out
@@ -64,6 +64,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Limit

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
index 7da32e2..b771fe9 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
@@ -280,6 +280,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 500 Data size: 5000 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -451,6 +452,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/union_top_level.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_top_level.q.out b/ql/src/test/results/clientpositive/spark/union_top_level.q.out
index e114a7e..36088c1 100644
--- a/ql/src/test/results/clientpositive/spark/union_top_level.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_top_level.q.out
@@ -49,6 +49,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Map 3 
             Map Operator Tree:
@@ -68,6 +69,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Map 5 
             Map Operator Tree:
@@ -87,6 +89,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -258,6 +261,7 @@ STAGE PLANS:
                     Reduce Output Operator
                       sort order: 
                       Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: string), _col1 (type: string)
         Reducer 3 
             Reduce Operator Tree:
@@ -381,6 +385,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Map 3 
             Map Operator Tree:
@@ -400,6 +405,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Map 5 
             Map Operator Tree:
@@ -419,6 +425,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -595,6 +602,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Map 3 
             Map Operator Tree:
@@ -614,6 +622,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Map 5 
             Map Operator Tree:
@@ -633,6 +642,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -796,6 +806,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Map 3 
             Map Operator Tree:
@@ -815,6 +826,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Map 5 
             Map Operator Tree:
@@ -834,6 +846,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
index c3e7779..ed3932a 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
@@ -161,6 +161,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: int)
                   sort order: +
                   Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
         Reducer 3 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
index bcabc98..eb9ac84 100644
--- a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
@@ -125,6 +125,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                       sort order: +++
                       Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)
         Reducer 2 
             Reduce Operator Tree:
@@ -216,6 +217,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                       sort order: +++
                       Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)
             Execution mode: vectorized
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
index 50cc7bc..d52ae73 100644
--- a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
@@ -323,6 +323,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: string)
                   sort order: +
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
index 390fdc6..5c5db87 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
@@ -121,6 +121,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), _col5 (type: tinyint), _col6 (type: tinyint), _col7 (type: tinyint), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: float), _col19 (type: double), _col20 (type: tinyint)
                     sort order: +++++++++++++++++++++
                     Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
             Reduce Operator Tree:
@@ -374,6 +375,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), _col5 (type: tinyint), _col6 (type: tinyint), _col7 (type: tinyint), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: float), _col19 (type: double), _col20 (type: tinyint)
                     sort order: +++++++++++++++++++++
                     Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
index e376b30..a7ad0e6 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
@@ -172,6 +172,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: bigint), _col1 (type: double)
                         sort order: ++
                         Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: double)
             Execution mode: vectorized
         Reducer 2 
@@ -347,6 +348,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: double), _col1 (type: double)
                         sort order: ++
                         Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: double), _col4 (type: double), _col5 (type: double)
             Execution mode: vectorized
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
index 0d2728e..e7f78fd 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
@@ -74,6 +74,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: double)
                       sort order: +
                       Statistics: Num rows: 200 Data size: 54496 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
index e970f9a..eb35726 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
@@ -949,6 +949,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: timestamp), _col3 (type: string), _col4 (type: boolean), _col5 (type: tinyint), _col6 (type: float), _col7 (type: timestamp), _col8 (type: smallint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: int), _col12 (type: double), _col13 (type: smallint), _col14 (type: smallint), _col15 (type: smallint), _col16 (type: double), _col17 (type: double), _col18 (type: float), _col19 (type: double), _col20 (type: double), _col21 (type: tinyint), _col22 (type: double)
                         sort order: +++++++++++++++++++++++
                         Statistics: Num rows: 9898 Data size: 303864 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
@@ -1207,6 +1208,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: boolean), _col4 (type: float), _col5 (type: double), _col6 (type: timestamp), _col7 (type: smallint), _col8 (type: string), _col9 (type: boolean), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: float), _col15 (type: float), _col16 (type: float), _col17 (type: double), _col18 (type: double), _col19 (type: bigint), _col20 (type: double), _col21 (type: smallint), _col22 (type: bigint), _col23 (type: double), _col24 (type: smallint)
                         sort order: +++++++++++++++++++++++++
                         Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
@@ -1414,6 +1416,7 @@ STAGE PLANS:
                         key expressions: _col8 (type: boolean), _col1 (type: string), _col3 (type: timestamp), _col5 (type: float), _col6 (type: bigint), _col1 (type: string), _col4 (type: double), _col0 (type: int), _col7 (type: smallint), _col4 (type: double), _col9 (type: int), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: float), _col13 (type: bigint), _col14 (type: double), _col15 (type: double), _col16 (type: bigint), _col17 (type: double), _col18 (type: double), _col19 (type: double), _col20 (type: smallint), _col21 (type: int)
                         sort order: +++++++++++++++++++++++
                         Statistics: Num rows: 10922 Data size: 335301 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: boolean)
             Execution mode: vectorized
         Reducer 2 
@@ -1680,6 +1683,7 @@ STAGE PLANS:
                         key expressions: _col5 (type: smallint), _col1 (type: string), _col2 (type: double), _col3 (type: float), _col4 (type: bigint), _col6 (type: double), _col7 (type: int), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: float), _col13 (type: int), _col14 (type: double), _col15 (type: double)
                         sort order: +++++++++++++++
                         Statistics: Num rows: 3868 Data size: 118746 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: timestamp)
             Execution mode: vectorized
         Reducer 2 
@@ -1913,6 +1917,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: int), _col7 (type: double), _col8 (type: int), _col9 (type: bigint), _col10 (type: bigint)
                     sort order: +++++++++++
                     Statistics: Num rows: 1251 Data size: 38405 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
             Reduce Operator Tree:
@@ -2375,6 +2380,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: double), _col11 (type: tinyint), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: double), _col18 (type: double), _col19 (type: double), _col20 (type: double), _col21 (type: double), _col22 (type: double), _col23 (type: double), _col24 (type: double), _col25 (type: double), _col26 (type: double), _col27 (type: tinyint), _col28 (type: double), _col29 (type: double), _col30 (type: double), _col31 (type: double), _col32 (type: double), _col33 (type: double), _col34 (type: bigint), _col35 (type: double), _col36 (type: bigint), _col37 (type: bigint), _col38 (type: double)
                     sort order: +++++++++++++++++++++++++++++++++++++++
                     Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/subquery_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_in.q.out b/ql/src/test/results/clientpositive/subquery_in.q.out
index 8609a71..7450d24 100644
--- a/ql/src/test/results/clientpositive/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/subquery_in.q.out
@@ -259,6 +259,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
@@ -439,6 +440,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/subquery_notin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notin.q.out b/ql/src/test/results/clientpositive/subquery_notin.q.out
index 56553fd..2973e1f 100644
--- a/ql/src/test/results/clientpositive/subquery_notin.q.out
+++ b/ql/src/test/results/clientpositive/subquery_notin.q.out
@@ -326,6 +326,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: p_name (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -481,6 +482,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: p_name (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -609,6 +611,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
@@ -775,6 +778,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
@@ -935,6 +939,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
@@ -1126,6 +1131,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
index 908ad39..c09d327 100644
--- a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
+++ b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
@@ -212,6 +212,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p2_mfgr (type: string)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
@@ -380,6 +381,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
@@ -814,6 +816,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: p_name (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -969,6 +972,7 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: p_mfgr (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: p_name (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/temp_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/temp_table.q.out b/ql/src/test/results/clientpositive/temp_table.q.out
index a9f2bae..f3bef65 100644
--- a/ql/src/test/results/clientpositive/temp_table.q.out
+++ b/ql/src/test/results/clientpositive/temp_table.q.out
@@ -251,6 +251,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -317,6 +318,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: string)
                   sort order: +
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: string)
           TableScan
             alias: bar
@@ -331,6 +333,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: string)
                   sort order: +
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/ctas.q.out b/ql/src/test/results/clientpositive/tez/ctas.q.out
index 27b189d..d4fa0ab 100644
--- a/ql/src/test/results/clientpositive/tez/ctas.q.out
+++ b/ql/src/test/results/clientpositive/tez/ctas.q.out
@@ -52,6 +52,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
@@ -65,6 +66,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: string), _col1 (type: string)
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
@@ -199,6 +201,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
@@ -212,6 +215,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: string), _col1 (type: string)
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
@@ -346,6 +350,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: double), _col1 (type: string)
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
@@ -359,6 +364,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: double), _col1 (type: string)
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
@@ -557,6 +563,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string), _col1 (type: string)
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
@@ -570,6 +577,7 @@ STAGE PLANS:
                     key expressions: _col0 (type: string), _col1 (type: string)
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
@@ -749,6 +757,8 @@ STAGE PLANS:
                       sort order: ++
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       tag: -1
+                      TopN: 10
+                      TopN Hash Memory Usage: 0.1
                       auto parallelism: true
             Path -> Alias:
 #### A masked pattern was here ####
@@ -814,6 +824,8 @@ STAGE PLANS:
                     sort order: ++
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
                     auto parallelism: false
         Reducer 3 
             Needs Tagging: false

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
index 628b2db..2f6753a 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
@@ -273,6 +273,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
             Execution mode: vectorized
         Reducer 2 
@@ -656,6 +657,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
             Execution mode: vectorized
         Reducer 2 
@@ -1506,6 +1508,7 @@ STAGE PLANS:
                       key expressions: _col2 (type: int)
                       sort order: +
                       Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float)
             Execution mode: vectorized
         Reducer 2 
@@ -1835,6 +1838,9 @@ POSTHOOK: Input: default@over1k_part2_orc
 POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27
 POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
+409	65536	4294967490	46.97	foo	NULL
+374	65560	4294967516	65.43	foo	NULL
+473	65720	4294967324	80.74	foo	NULL
 405	65536	4294967508	82.24	foo	27
 457	65570	4294967464	81.58	foo	27
 256	65599	4294967383	89.55	foo	27
@@ -1851,9 +1857,6 @@ POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 329	65778	4294967451	6.63	foo	27
 401	65779	4294967402	97.39	foo	27
 262	65787	4294967371	57.35	foo	27
-409	65536	4294967490	46.97	foo	NULL
-374	65560	4294967516	65.43	foo	NULL
-473	65720	4294967324	80.74	foo	NULL
 PREHOOK: query: select count(*) from over1k_part2_orc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k_part2_orc
@@ -1982,6 +1985,9 @@ POSTHOOK: Input: default@over1k_part2_orc
 POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27
 POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
+409	65536	4294967490	46.97	foo	NULL
+374	65560	4294967516	65.43	foo	NULL
+473	65720	4294967324	80.74	foo	NULL
 405	65536	4294967508	82.24	foo	27
 457	65570	4294967464	81.58	foo	27
 256	65599	4294967383	89.55	foo	27
@@ -1998,9 +2004,6 @@ POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 329	65778	4294967451	6.63	foo	27
 401	65779	4294967402	97.39	foo	27
 262	65787	4294967371	57.35	foo	27
-409	65536	4294967490	46.97	foo	NULL
-374	65560	4294967516	65.43	foo	NULL
-473	65720	4294967324	80.74	foo	NULL
 PREHOOK: query: select count(*) from over1k_part2_orc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k_part2_orc
@@ -2312,6 +2315,9 @@ POSTHOOK: Input: default@over1k_part_buck_sort2_orc
 POSTHOOK: Input: default@over1k_part_buck_sort2_orc@t=27
 POSTHOOK: Input: default@over1k_part_buck_sort2_orc@t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
+409	65536	4294967490	46.97	NULL
+374	65560	4294967516	65.43	NULL
+473	65720	4294967324	80.74	NULL
 329	65778	4294967451	6.63	27
 367	65675	4294967518	12.32	27
 278	65622	4294967516	25.67	27
@@ -2328,9 +2334,6 @@ POSTHOOK: Input: default@over1k_part_buck_sort2_orc@t=__HIVE_DEFAULT_PARTITION__
 503	65628	4294967371	95.07	27
 401	65779	4294967402	97.39	27
 340	65677	4294967461	98.96	27
-409	65536	4294967490	46.97	NULL
-374	65560	4294967516	65.43	NULL
-473	65720	4294967324	80.74	NULL
 PREHOOK: query: explain select count(*) from over1k_part_buck_sort2_orc
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select count(*) from over1k_part_buck_sort2_orc
@@ -2527,6 +2530,9 @@ POSTHOOK: Input: default@over1k_part_buck_sort2_orc
 POSTHOOK: Input: default@over1k_part_buck_sort2_orc@t=27
 POSTHOOK: Input: default@over1k_part_buck_sort2_orc@t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
+409	65536	4294967490	46.97	NULL
+374	65560	4294967516	65.43	NULL
+473	65720	4294967324	80.74	NULL
 329	65778	4294967451	6.63	27
 367	65675	4294967518	12.32	27
 278	65622	4294967516	25.67	27
@@ -2543,9 +2549,6 @@ POSTHOOK: Input: default@over1k_part_buck_sort2_orc@t=__HIVE_DEFAULT_PARTITION__
 503	65628	4294967371	95.07	27
 401	65779	4294967402	97.39	27
 340	65677	4294967461	98.96	27
-409	65536	4294967490	46.97	NULL
-374	65560	4294967516	65.43	NULL
-473	65720	4294967324	80.74	NULL
 PREHOOK: query: explain select count(*) from over1k_part_buck_sort2_orc
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select count(*) from over1k_part_buck_sort2_orc

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
index af05d3d..fbe434c 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
@@ -215,6 +215,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
         Reducer 2 
             Reduce Operator Tree:
@@ -576,6 +577,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
         Reducer 2 
             Reduce Operator Tree:
@@ -1414,6 +1416,7 @@ STAGE PLANS:
                       key expressions: _col2 (type: int)
                       sort order: +
                       Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float)
         Reducer 2 
             Reduce Operator Tree:
@@ -1736,6 +1739,9 @@ POSTHOOK: Input: default@over1k_part2
 POSTHOOK: Input: default@over1k_part2@ds=foo/t=27
 POSTHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
+409	65536	4294967490	46.97	foo	NULL
+374	65560	4294967516	65.43	foo	NULL
+473	65720	4294967324	80.74	foo	NULL
 405	65536	4294967508	82.24	foo	27
 457	65570	4294967464	81.58	foo	27
 256	65599	4294967383	89.55	foo	27
@@ -1752,9 +1758,6 @@ POSTHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 329	65778	4294967451	6.63	foo	27
 401	65779	4294967402	97.39	foo	27
 262	65787	4294967371	57.35	foo	27
-409	65536	4294967490	46.97	foo	NULL
-374	65560	4294967516	65.43	foo	NULL
-473	65720	4294967324	80.74	foo	NULL
 PREHOOK: query: select count(*) from over1k_part2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k_part2
@@ -1883,6 +1886,9 @@ POSTHOOK: Input: default@over1k_part2
 POSTHOOK: Input: default@over1k_part2@ds=foo/t=27
 POSTHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
+409	65536	4294967490	46.97	foo	NULL
+374	65560	4294967516	65.43	foo	NULL
+473	65720	4294967324	80.74	foo	NULL
 405	65536	4294967508	82.24	foo	27
 457	65570	4294967464	81.58	foo	27
 256	65599	4294967383	89.55	foo	27
@@ -1899,9 +1905,6 @@ POSTHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 329	65778	4294967451	6.63	foo	27
 401	65779	4294967402	97.39	foo	27
 262	65787	4294967371	57.35	foo	27
-409	65536	4294967490	46.97	foo	NULL
-374	65560	4294967516	65.43	foo	NULL
-473	65720	4294967324	80.74	foo	NULL
 PREHOOK: query: select count(*) from over1k_part2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k_part2
@@ -2190,6 +2193,9 @@ POSTHOOK: Input: default@over1k_part_buck_sort2
 POSTHOOK: Input: default@over1k_part_buck_sort2@t=27
 POSTHOOK: Input: default@over1k_part_buck_sort2@t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
+409	65536	4294967490	46.97	NULL
+374	65560	4294967516	65.43	NULL
+473	65720	4294967324	80.74	NULL
 329	65778	4294967451	6.63	27
 367	65675	4294967518	12.32	27
 278	65622	4294967516	25.67	27
@@ -2206,9 +2212,6 @@ POSTHOOK: Input: default@over1k_part_buck_sort2@t=__HIVE_DEFAULT_PARTITION__
 503	65628	4294967371	95.07	27
 401	65779	4294967402	97.39	27
 340	65677	4294967461	98.96	27
-409	65536	4294967490	46.97	NULL
-374	65560	4294967516	65.43	NULL
-473	65720	4294967324	80.74	NULL
 PREHOOK: query: select count(*) from over1k_part_buck_sort2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k_part_buck_sort2
@@ -2333,6 +2336,9 @@ POSTHOOK: Input: default@over1k_part_buck_sort2
 POSTHOOK: Input: default@over1k_part_buck_sort2@t=27
 POSTHOOK: Input: default@over1k_part_buck_sort2@t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
+409	65536	4294967490	46.97	NULL
+374	65560	4294967516	65.43	NULL
+473	65720	4294967324	80.74	NULL
 329	65778	4294967451	6.63	27
 367	65675	4294967518	12.32	27
 278	65622	4294967516	25.67	27
@@ -2349,9 +2355,6 @@ POSTHOOK: Input: default@over1k_part_buck_sort2@t=__HIVE_DEFAULT_PARTITION__
 503	65628	4294967371	95.07	27
 401	65779	4294967402	97.39	27
 340	65677	4294967461	98.96	27
-409	65536	4294967490	46.97	NULL
-374	65560	4294967516	65.43	NULL
-473	65720	4294967324	80.74	NULL
 PREHOOK: query: select count(*) from over1k_part_buck_sort2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k_part_buck_sort2

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/insert_into1.q.out b/ql/src/test/results/clientpositive/tez/insert_into1.q.out
index 0e82691..72980fa 100644
--- a/ql/src/test/results/clientpositive/tez/insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/tez/insert_into1.q.out
@@ -44,6 +44,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -161,6 +162,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -278,6 +280,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/insert_into2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/insert_into2.q.out b/ql/src/test/results/clientpositive/tez/insert_into2.q.out
index b7668ff..d04aa71 100644
--- a/ql/src/test/results/clientpositive/tez/insert_into2.q.out
+++ b/ql/src/test/results/clientpositive/tez/insert_into2.q.out
@@ -44,6 +44,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -202,6 +203,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -329,6 +331,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/script_pipe.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/script_pipe.q.out b/ql/src/test/results/clientpositive/tez/script_pipe.q.out
index fb95cbc..546f2fb 100644
--- a/ql/src/test/results/clientpositive/tez/script_pipe.q.out
+++ b/ql/src/test/results/clientpositive/tez/script_pipe.q.out
@@ -30,6 +30,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/subquery_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/subquery_in.q.out b/ql/src/test/results/clientpositive/tez/subquery_in.q.out
index 8d2cd31..b9a6897 100644
--- a/ql/src/test/results/clientpositive/tez/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/tez/subquery_in.q.out
@@ -298,6 +298,7 @@ STAGE PLANS:
                     sort order: ++
                     Map-reduce partition columns: p_mfgr (type: string)
                     Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Merge Join Operator
@@ -466,6 +467,7 @@ STAGE PLANS:
                     sort order: ++
                     Map-reduce partition columns: p_mfgr (type: string)
                     Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:
               Merge Join Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/temp_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/temp_table.q.out b/ql/src/test/results/clientpositive/tez/temp_table.q.out
index 200ccdd..ec3f810 100644
--- a/ql/src/test/results/clientpositive/tez/temp_table.q.out
+++ b/ql/src/test/results/clientpositive/tez/temp_table.q.out
@@ -182,6 +182,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -254,6 +255,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Map 4 
             Map Operator Tree:
@@ -268,6 +270,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string)
                       sort order: +
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
         Reducer 3 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/tez/union3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/union3.q.out b/ql/src/test/results/clientpositive/tez/union3.q.out
index 1a9feed..4bbbd7e 100644
--- a/ql/src/test/results/clientpositive/tez/union3.q.out
+++ b/ql/src/test/results/clientpositive/tez/union3.q.out
@@ -64,6 +64,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -77,6 +78,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
         Map 7 
             Map Operator Tree:
                 TableScan
@@ -90,6 +92,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
         Map 9 
             Map Operator Tree:
                 TableScan
@@ -103,6 +106,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
         Reducer 10 
             Reduce Operator Tree:
               Limit


[14/27] hive git commit: HIVE-12498: ACID: Setting OrcRecordUpdater.OrcOptions.tableProperties() has no effect (Prasanth Jayachandran reviewed by Eugene Koifman)

Posted by om...@apache.org.
HIVE-12498: ACID: Setting OrcRecordUpdater.OrcOptions.tableProperties() has no effect (Prasanth Jayachandran reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f679a5e1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f679a5e1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f679a5e1

Branch: refs/heads/master-fixed
Commit: f679a5e19da55e1ef90179ea06ae999582601588
Parents: 22b6203
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Wed Nov 25 12:10:02 2015 -0600
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:36 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java |  3 +-
 .../hive/ql/io/orc/TestOrcRecordUpdater.java    | 58 ++++++++++++++++++--
 2 files changed, 54 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f679a5e1/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index 67c5a11..ee31c23 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -247,7 +247,8 @@ public class OrcRecordUpdater implements RecordUpdater {
       writerOptions = ((OrcOptions) options).getOrcOptions();
     }
     if (writerOptions == null) {
-      writerOptions = OrcFile.writerOptions(options.getConfiguration());
+      writerOptions = OrcFile.writerOptions(options.getTableProperties(),
+          options.getConfiguration());
     }
     writerOptions.fileSystem(fs).callback(indexBuilder);
     if (!options.isWritingBase()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f679a5e1/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
index 22030b4..973cc40 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
@@ -18,6 +18,15 @@
 
 package org.apache.hadoop.hive.ql.io.orc;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.PrintStream;
+import java.util.Properties;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -33,12 +42,6 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.Reporter;
 import org.junit.Test;
 
-import java.io.DataInputStream;
-import java.io.File;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
 public class TestOrcRecordUpdater {
 
   @Test
@@ -180,6 +183,49 @@ public class TestOrcRecordUpdater {
   }
 
   @Test
+  public void testWriterTblProperties() throws Exception {
+    Path root = new Path(workDir, "testWriterTblProperties");
+    Configuration conf = new Configuration();
+    // Must use raw local because the checksummer doesn't honor flushes.
+    FileSystem fs = FileSystem.getLocal(conf).getRaw();
+    ObjectInspector inspector;
+    synchronized (TestOrcFile.class) {
+      inspector = ObjectInspectorFactory.getReflectionObjectInspector
+          (MyRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
+    }
+    Properties tblProps = new Properties();
+    tblProps.setProperty("orc.compress", "SNAPPY");
+    AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
+        .filesystem(fs)
+        .bucket(10)
+        .writingBase(false)
+        .minimumTransactionId(10)
+        .maximumTransactionId(19)
+        .inspector(inspector)
+        .reporter(Reporter.NULL)
+        .finalDestination(root)
+        .tableProperties(tblProps);
+    RecordUpdater updater = new OrcRecordUpdater(root, options);
+    updater.insert(11, new MyRow("first"));
+    updater.insert(11, new MyRow("second"));
+    updater.insert(11, new MyRow("third"));
+    updater.flush();
+    updater.insert(12, new MyRow("fourth"));
+    updater.insert(12, new MyRow("fifth"));
+    updater.flush();
+
+    PrintStream origOut = System.out;
+    ByteArrayOutputStream myOut = new ByteArrayOutputStream();
+    System.setOut(new PrintStream(myOut));
+    FileDump.main(new String[]{root.toUri().toString()});
+    System.out.flush();
+    String outDump = new String(myOut.toByteArray());
+    assertEquals(true, outDump.contains("Compression: SNAPPY"));
+    System.setOut(origOut);
+    updater.close(false);
+  }
+
+  @Test
   public void testUpdates() throws Exception {
     Path root = new Path(workDir, "testUpdates");
     Configuration conf = new Configuration();


[12/27] hive git commit: HIVE-12501 : LLAP: don't use read(ByteBuffer) in IO (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by om...@apache.org.
HIVE-12501 : LLAP: don't use read(ByteBuffer) in IO (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/18ca715e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/18ca715e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/18ca715e

Branch: refs/heads/master-fixed
Commit: 18ca715e88374ec11c98d7dba3be7cd0758964b0
Parents: 60cb16b
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed Nov 25 17:25:06 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:36 2015 -0800

----------------------------------------------------------------------
 .../hive/ql/io/orc/RecordReaderUtils.java       | 43 +++++---------------
 1 file changed, 11 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/18ca715e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
index 6f3a3e9..0caeb1b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java
@@ -327,47 +327,26 @@ public class RecordReaderUtils {
           len -= read;
           off += read;
         }
-      } else if (doForceDirect) {
-        file.seek(base + off);
-        ByteBuffer directBuf = ByteBuffer.allocateDirect(len);
-        readDirect(file, len, directBuf);
-        range = range.replaceSelfWith(new BufferChunk(directBuf, range.getOffset()));
       } else {
+        // Don't use HDFS ByteBuffer API because it has no readFully, and is buggy and pointless.
         byte[] buffer = new byte[len];
         file.readFully((base + off), buffer, 0, buffer.length);
-        range = range.replaceSelfWith(new BufferChunk(ByteBuffer.wrap(buffer), range.getOffset()));
+        ByteBuffer bb = null;
+        if (doForceDirect) {
+          bb = ByteBuffer.allocateDirect(len);
+          bb.put(buffer);
+          bb.position(0);
+          bb.limit(len);
+        } else {
+          bb = ByteBuffer.wrap(buffer);
+        }
+        range = range.replaceSelfWith(new BufferChunk(bb, range.getOffset()));
       }
       range = range.next;
     }
     return prev.next;
   }
 
-  public static void readDirect(FSDataInputStream file,
-      int len, ByteBuffer directBuf) throws IOException {
-    // TODO: HDFS API is a mess, so handle all kinds of cases.
-    // Before 2.7, read() also doesn't adjust position correctly, so track it separately.
-    int pos = directBuf.position(), startPos = pos, endPos = pos + len;
-    try {
-      while (pos < endPos) {
-        int count = SHIMS.readByteBuffer(file, directBuf);
-        if (count < 0) throw new EOFException();
-        assert count != 0 : "0-length read: " + (endPos - pos) + "@" + (pos - startPos);
-        pos += count;
-        assert pos <= endPos : "Position " + pos + " > " + endPos + " after reading " + count;
-        directBuf.position(pos);
-      }
-    } catch (UnsupportedOperationException ex) {
-      assert pos == startPos;
-      // Happens in q files and such.
-      RecordReaderImpl.LOG.error("Stream does not support direct read; we will copy.");
-      byte[] buffer = new byte[len];
-      file.readFully(buffer, 0, buffer.length);
-      directBuf.put(buffer);
-    }
-    directBuf.position(startPos);
-    directBuf.limit(startPos + len);
-  }
-
 
   static List<DiskRange> getStreamBuffers(DiskRangeList range, long offset, long length) {
     // This assumes sorted ranges (as do many other parts of ORC code.


[11/27] hive git commit: HIVE-12399: Filter out NULLs in the Native Vector MapJoin operators. (Matt McCline via Gopal V)

Posted by om...@apache.org.
HIVE-12399:  Filter out NULLs in the Native Vector MapJoin operators. (Matt McCline via Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2da3436d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2da3436d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2da3436d

Branch: refs/heads/master-fixed
Commit: 2da3436dca660f0bfeac020da51c0102c05734c1
Parents: 249bcd8
Author: Gopal V <go...@apache.org>
Authored: Wed Nov 25 00:51:39 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:35 2015 -0800

----------------------------------------------------------------------
 .../VectorMapJoinInnerBigOnlyLongOperator.java  | 63 +++++++++++++-------
 ...ctorMapJoinInnerBigOnlyMultiKeyOperator.java | 61 ++++++++++---------
 ...VectorMapJoinInnerBigOnlyStringOperator.java | 57 +++++++++++-------
 .../mapjoin/VectorMapJoinInnerLongOperator.java | 63 +++++++++++++-------
 .../VectorMapJoinInnerMultiKeyOperator.java     | 61 ++++++++++---------
 .../VectorMapJoinInnerStringOperator.java       | 58 ++++++++++--------
 .../VectorMapJoinLeftSemiLongOperator.java      | 63 +++++++++++++-------
 .../VectorMapJoinLeftSemiMultiKeyOperator.java  | 63 ++++++++++----------
 .../VectorMapJoinLeftSemiStringOperator.java    | 57 +++++++++++-------
 .../fast/VectorMapJoinFastLongHashTable.java    |  7 +--
 .../fast/VectorMapJoinFastStringCommon.java     | 11 ++--
 11 files changed, 325 insertions(+), 239 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
index e8b722e..9e77d22 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java
@@ -179,13 +179,17 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
          * Single-Column Long specific repeated lookup.
          */
 
-        long key = vector[0];
         JoinUtil.JoinResult joinResult;
-        if (useMinMax && (key < min || key > max)) {
-          // Out of range for whole batch.
+        if (!joinColVector.noNulls && joinColVector.isNull[0]) {
           joinResult = JoinUtil.JoinResult.NOMATCH;
         } else {
-          joinResult = hashMultiSet.contains(key, hashMultiSetResults[0]);
+          long key = vector[0];
+          if (useMinMax && (key < min || key > max)) {
+            // Out of range for whole batch.
+            joinResult = JoinUtil.JoinResult.NOMATCH;
+          } else {
+            joinResult = hashMultiSet.contains(key, hashMultiSetResults[0]);
+          }
         }
 
         /*
@@ -235,13 +239,21 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
            * Single-Column Long get key.
            */
 
-          long currentKey = vector[batchIndex];
+          long currentKey;
+          boolean isNull;
+          if (!joinColVector.noNulls && joinColVector.isNull[batchIndex]) {
+            currentKey = 0;
+            isNull = true;
+          } else {
+            currentKey = vector[batchIndex];
+            isNull = false;
+          }
 
           /*
            * Equal key series checking.
            */
 
-          if (!haveSaveKey || currentKey != saveKey) {
+          if (isNull || !haveSaveKey || currentKey != saveKey) {
 
             // New key.
 
@@ -261,25 +273,30 @@ public class VectorMapJoinInnerBigOnlyLongOperator extends VectorMapJoinInnerBig
               }
             }
 
-            // Regardless of our matching result, we keep that information to make multiple use
-            // of it for a possible series of equal keys.
-            haveSaveKey = true;
-
-            /*
-             * Single-Column Long specific save key.
-             */
-
-            saveKey = currentKey;
-
-            /*
-             * Single-Column Long specific lookup key.
-             */
-
-            if (useMinMax && (currentKey < min || currentKey > max)) {
-              // Key out of range for whole hash table.
+            if (isNull) {
               saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              haveSaveKey = false;
             } else {
-              saveJoinResult = hashMultiSet.contains(currentKey, hashMultiSetResults[hashMultiSetResultCount]);
+              // Regardless of our matching result, we keep that information to make multiple use
+              // of it for a possible series of equal keys.
+              haveSaveKey = true;
+  
+              /*
+               * Single-Column Long specific save key.
+               */
+  
+              saveKey = currentKey;
+  
+              /*
+               * Single-Column Long specific lookup key.
+               */
+  
+              if (useMinMax && (currentKey < min || currentKey > max)) {
+                // Key out of range for whole hash table.
+                saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              } else {
+                saveJoinResult = hashMultiSet.contains(currentKey, hashMultiSetResults[hashMultiSetResultCount]);
+              }
             }
 
             /*

http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
index e016013..e4f6c5d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java
@@ -196,13 +196,14 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
 
         keyVectorSerializeWrite.setOutput(currentKeyOutput);
         keyVectorSerializeWrite.serializeWrite(batch, 0);
+        JoinUtil.JoinResult joinResult;
         if (keyVectorSerializeWrite.getHasAnyNulls()) {
-          // Not expecting NULLs in MapJoin -- they should have been filtered out.
-          throw new HiveException("Null key not expected in MapJoin");
+          joinResult = JoinUtil.JoinResult.NOMATCH;
+        } else {
+          byte[] keyBytes = currentKeyOutput.getData();
+          int keyLength = currentKeyOutput.getLength();
+          joinResult = hashMultiSet.contains(keyBytes, 0, keyLength, hashMultiSetResults[0]);
         }
-        byte[] keyBytes = currentKeyOutput.getData();
-        int keyLength = currentKeyOutput.getLength();
-        JoinUtil.JoinResult joinResult = hashMultiSet.contains(keyBytes, 0, keyLength, hashMultiSetResults[0]);
 
         /*
          * Common repeated join result processing.
@@ -254,16 +255,13 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
           // Generate binary sortable key for current row in vectorized row batch.
           keyVectorSerializeWrite.setOutput(currentKeyOutput);
           keyVectorSerializeWrite.serializeWrite(batch, batchIndex);
-          if (keyVectorSerializeWrite.getHasAnyNulls()) {
-            // Not expecting NULLs in MapJoin -- they should have been filtered out.
-            throw new HiveException("Null key not expected in MapJoin");
-          }
+          boolean isAnyNulls = keyVectorSerializeWrite.getHasAnyNulls();
 
           /*
            * Equal key series checking.
            */
 
-          if (!haveSaveKey || !saveKeyOutput.arraysEquals(currentKeyOutput)) {
+          if (isAnyNulls || !haveSaveKey || !saveKeyOutput.arraysEquals(currentKeyOutput)) {
 
             // New key.
 
@@ -283,25 +281,30 @@ public class VectorMapJoinInnerBigOnlyMultiKeyOperator extends VectorMapJoinInne
               }
             }
 
-            // Regardless of our matching result, we keep that information to make multiple use
-            // of it for a possible series of equal keys.
-            haveSaveKey = true;
-
-            /*
-             * Multi-Key specific save key.
-             */
-
-            temp = saveKeyOutput;
-            saveKeyOutput = currentKeyOutput;
-            currentKeyOutput = temp;
-
-            /*
-             * Single-Column Long specific lookup key.
-             */
-
-            byte[] keyBytes = saveKeyOutput.getData();
-            int keyLength = saveKeyOutput.getLength();
-            saveJoinResult = hashMultiSet.contains(keyBytes, 0, keyLength, hashMultiSetResults[hashMultiSetResultCount]);
+            if (isAnyNulls) {
+              saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              haveSaveKey = false;
+            } else {
+              // Regardless of our matching result, we keep that information to make multiple use
+              // of it for a possible series of equal keys.
+              haveSaveKey = true;
+
+              /*
+               * Multi-Key specific save key.
+               */
+
+              temp = saveKeyOutput;
+              saveKeyOutput = currentKeyOutput;
+              currentKeyOutput = temp;
+  
+              /*
+               * Single-Column Long specific lookup key.
+               */
+  
+              byte[] keyBytes = saveKeyOutput.getData();
+              int keyLength = saveKeyOutput.getLength();
+              saveJoinResult = hashMultiSet.contains(keyBytes, 0, keyLength, hashMultiSetResults[hashMultiSetResultCount]);
+            }
 
             /*
              * Common inner big-only join result processing.

http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
index c07d353..2711b10 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java
@@ -175,10 +175,15 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
          * Single-Column String specific repeated lookup.
          */
 
-        byte[] keyBytes = vector[0];
-        int keyStart = start[0];
-        int keyLength = length[0];
-        JoinUtil.JoinResult joinResult = hashMultiSet.contains(keyBytes, keyStart, keyLength, hashMultiSetResults[0]);
+        JoinUtil.JoinResult joinResult;
+        if (!joinColVector.noNulls && joinColVector.isNull[0]) {
+          joinResult = JoinUtil.JoinResult.NOMATCH;
+        } else {
+          byte[] keyBytes = vector[0];
+          int keyStart = start[0];
+          int keyLength = length[0];
+          joinResult = hashMultiSet.contains(keyBytes, keyStart, keyLength, hashMultiSetResults[0]);
+        }
 
         /*
          * Common repeated join result processing.
@@ -228,12 +233,13 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
            */
 
           // Implicit -- use batchIndex.
+          boolean isNull = !joinColVector.noNulls && joinColVector.isNull[batchIndex];
 
           /*
            * Equal key series checking.
            */
 
-          if (!haveSaveKey ||
+          if (isNull || !haveSaveKey ||
               StringExpr.equal(vector[saveKeyBatchIndex], start[saveKeyBatchIndex], length[saveKeyBatchIndex],
                                  vector[batchIndex], start[batchIndex], length[batchIndex]) == false) {
 
@@ -255,24 +261,29 @@ public class VectorMapJoinInnerBigOnlyStringOperator extends VectorMapJoinInnerB
               }
             }
 
-            // Regardless of our matching result, we keep that information to make multiple use
-            // of it for a possible series of equal keys.
-            haveSaveKey = true;
-
-            /*
-             * Single-Column String specific save key.
-             */
-
-            saveKeyBatchIndex = batchIndex;
-
-            /*
-             * Single-Column String specific lookup key.
-             */
-
-            byte[] keyBytes = vector[batchIndex];
-            int keyStart = start[batchIndex];
-            int keyLength = length[batchIndex];
-            saveJoinResult = hashMultiSet.contains(keyBytes, keyStart, keyLength, hashMultiSetResults[hashMultiSetResultCount]);
+            if (isNull) {
+              saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              haveSaveKey = false;
+            } else {
+              // Regardless of our matching result, we keep that information to make multiple use
+              // of it for a possible series of equal keys.
+              haveSaveKey = true;
+  
+              /*
+               * Single-Column String specific save key.
+               */
+  
+              saveKeyBatchIndex = batchIndex;
+  
+              /*
+               * Single-Column String specific lookup key.
+               */
+  
+              byte[] keyBytes = vector[batchIndex];
+              int keyStart = start[batchIndex];
+              int keyLength = length[batchIndex];
+              saveJoinResult = hashMultiSet.contains(keyBytes, keyStart, keyLength, hashMultiSetResults[hashMultiSetResultCount]);
+            }
 
             /*
              * Common inner big-only join result processing.

http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
index 92d7328..0197225 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java
@@ -177,13 +177,17 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
          * Single-Column Long specific repeated lookup.
          */
 
-        long key = vector[0];
         JoinUtil.JoinResult joinResult;
-        if (useMinMax && (key < min || key > max)) {
-          // Out of range for whole batch.
+        if (!joinColVector.noNulls && joinColVector.isNull[0]) {
           joinResult = JoinUtil.JoinResult.NOMATCH;
         } else {
-          joinResult = hashMap.lookup(key, hashMapResults[0]);
+          long key = vector[0];
+          if (useMinMax && (key < min || key > max)) {
+            // Out of range for whole batch.
+            joinResult = JoinUtil.JoinResult.NOMATCH;
+          } else {
+            joinResult = hashMap.lookup(key, hashMapResults[0]);
+          }
         }
 
         /*
@@ -233,13 +237,21 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
            * Single-Column Long get key.
            */
 
-          long currentKey = vector[batchIndex];
+          long currentKey;
+          boolean isNull;
+          if (!joinColVector.noNulls && joinColVector.isNull[batchIndex]) {
+            currentKey = 0;
+            isNull = true;
+          } else {
+            currentKey = vector[batchIndex];
+            isNull = false;
+          }
 
           /*
            * Equal key series checking.
            */
 
-          if (!haveSaveKey || currentKey != saveKey) {
+          if (isNull || !haveSaveKey || currentKey != saveKey) {
 
             // New key.
 
@@ -258,25 +270,30 @@ public class VectorMapJoinInnerLongOperator extends VectorMapJoinInnerGenerateRe
               }
             }
 
-            // Regardless of our matching result, we keep that information to make multiple use
-            // of it for a possible series of equal keys.
-            haveSaveKey = true;
-
-            /*
-             * Single-Column Long specific save key.
-             */
-
-            saveKey = currentKey;
-
-            /*
-             * Single-Column Long specific lookup key.
-             */
-
-            if (useMinMax && (currentKey < min || currentKey > max)) {
-              // Key out of range for whole hash table.
+            if (isNull) {
               saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              haveSaveKey = false;
             } else {
-              saveJoinResult = hashMap.lookup(currentKey, hashMapResults[hashMapResultCount]);
+              // Regardless of our matching result, we keep that information to make multiple use
+              // of it for a possible series of equal keys.
+              haveSaveKey = true;
+  
+              /*
+               * Single-Column Long specific save key.
+               */
+  
+              saveKey = currentKey;
+  
+              /*
+               * Single-Column Long specific lookup key.
+               */
+  
+              if (useMinMax && (currentKey < min || currentKey > max)) {
+                // Key out of range for whole hash table.
+                saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              } else {
+                saveJoinResult = hashMap.lookup(currentKey, hashMapResults[hashMapResultCount]);
+              }
             }
 
             /*

http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
index eb78174..837d97b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java
@@ -193,13 +193,14 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
 
         keyVectorSerializeWrite.setOutput(currentKeyOutput);
         keyVectorSerializeWrite.serializeWrite(batch, 0);
+        JoinUtil.JoinResult joinResult;
         if (keyVectorSerializeWrite.getHasAnyNulls()) {
-          // Not expecting NULLs in MapJoin -- they should have been filtered out.
-          throw new HiveException("Null key not expected in MapJoin");
+          joinResult = JoinUtil.JoinResult.NOMATCH;
+        } else {
+          byte[] keyBytes = currentKeyOutput.getData();
+          int keyLength = currentKeyOutput.getLength();
+          joinResult = hashMap.lookup(keyBytes, 0, keyLength, hashMapResults[0]);
         }
-        byte[] keyBytes = currentKeyOutput.getData();
-        int keyLength = currentKeyOutput.getLength();
-        JoinUtil.JoinResult joinResult = hashMap.lookup(keyBytes, 0, keyLength, hashMapResults[0]);
 
         /*
          * Common repeated join result processing.
@@ -251,16 +252,13 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
           // Generate binary sortable key for current row in vectorized row batch.
           keyVectorSerializeWrite.setOutput(currentKeyOutput);
           keyVectorSerializeWrite.serializeWrite(batch, batchIndex);
-          if (keyVectorSerializeWrite.getHasAnyNulls()) {
-            // Not expecting NULLs in MapJoin -- they should have been filtered out.
-            throw new HiveException("Null key not expected in MapJoin");
-          }
+          boolean isAnyNull = keyVectorSerializeWrite.getHasAnyNulls();
 
           /*
            * Equal key series checking.
            */
 
-          if (!haveSaveKey || !saveKeyOutput.arraysEquals(currentKeyOutput)) {
+          if (isAnyNull || !haveSaveKey || !saveKeyOutput.arraysEquals(currentKeyOutput)) {
 
             // New key.
 
@@ -279,25 +277,30 @@ public class VectorMapJoinInnerMultiKeyOperator extends VectorMapJoinInnerGenera
               }
             }
 
-            // Regardless of our matching result, we keep that information to make multiple use
-            // of it for a possible series of equal keys.
-            haveSaveKey = true;
-
-            /*
-             * Multi-Key specific save key.
-             */
-
-            temp = saveKeyOutput;
-            saveKeyOutput = currentKeyOutput;
-            currentKeyOutput = temp;
-
-            /*
-             * Multi-Key specific lookup key.
-             */
-
-            byte[] keyBytes = saveKeyOutput.getData();
-            int keyLength = saveKeyOutput.getLength();
-            saveJoinResult = hashMap.lookup(keyBytes, 0, keyLength, hashMapResults[hashMapResultCount]);
+            if (isAnyNull) {
+              saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              haveSaveKey = false;
+            } else {
+              // Regardless of our matching result, we keep that information to make multiple use
+              // of it for a possible series of equal keys.
+              haveSaveKey = true;
+  
+              /*
+               * Multi-Key specific save key.
+               */
+  
+              temp = saveKeyOutput;
+              saveKeyOutput = currentKeyOutput;
+              currentKeyOutput = temp;
+  
+              /*
+               * Multi-Key specific lookup key.
+               */
+  
+              byte[] keyBytes = saveKeyOutput.getData();
+              int keyLength = saveKeyOutput.getLength();
+              saveJoinResult = hashMap.lookup(keyBytes, 0, keyLength, hashMapResults[hashMapResultCount]);
+            }
 
             /*
              * Common inner join result processing.

http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
index 4b508d4..b2711c3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java
@@ -169,11 +169,15 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
         /*
          * Single-Column String specific repeated lookup.
          */
-
-        byte[] keyBytes = vector[0];
-        int keyStart = start[0];
-        int keyLength = length[0];
-        JoinUtil.JoinResult joinResult = hashMap.lookup(keyBytes, keyStart, keyLength, hashMapResults[0]);
+        JoinUtil.JoinResult joinResult;
+        if (!joinColVector.noNulls && joinColVector.isNull[0]) {
+          joinResult = JoinUtil.JoinResult.NOMATCH;
+        } else {
+          byte[] keyBytes = vector[0];
+          int keyStart = start[0];
+          int keyLength = length[0];
+          joinResult = hashMap.lookup(keyBytes, keyStart, keyLength, hashMapResults[0]);
+        }
 
         /*
          * Common repeated join result processing.
@@ -223,12 +227,13 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
            */
 
           // Implicit -- use batchIndex.
+          boolean isNull = !joinColVector.noNulls && joinColVector.isNull[batchIndex];
 
           /*
            * Equal key series checking.
            */
 
-          if (!haveSaveKey ||
+          if (isNull || !haveSaveKey ||
               StringExpr.equal(vector[saveKeyBatchIndex], start[saveKeyBatchIndex], length[saveKeyBatchIndex],
                                  vector[batchIndex], start[batchIndex], length[batchIndex]) == false) {
 
@@ -249,24 +254,29 @@ public class VectorMapJoinInnerStringOperator extends VectorMapJoinInnerGenerate
               }
             }
 
-            // Regardless of our matching result, we keep that information to make multiple use
-            // of it for a possible series of equal keys.
-            haveSaveKey = true;
-
-            /*
-             * Single-Column String specific save key.
-             */
-
-            saveKeyBatchIndex = batchIndex;
-
-            /*
-             * Single-Column String specific lookup key.
-             */
-
-            byte[] keyBytes = vector[batchIndex];
-            int keyStart = start[batchIndex];
-            int keyLength = length[batchIndex];
-            saveJoinResult = hashMap.lookup(keyBytes, keyStart, keyLength, hashMapResults[hashMapResultCount]);
+            if (isNull) {
+              saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              haveSaveKey = false;
+            } else {
+              // Regardless of our matching result, we keep that information to make multiple use
+              // of it for a possible series of equal keys.
+              haveSaveKey = true;
+  
+              /*
+               * Single-Column String specific save key.
+               */
+  
+              saveKeyBatchIndex = batchIndex;
+  
+              /*
+               * Single-Column String specific lookup key.
+               */
+  
+              byte[] keyBytes = vector[batchIndex];
+              int keyStart = start[batchIndex];
+              int keyLength = length[batchIndex];
+              saveJoinResult = hashMap.lookup(keyBytes, keyStart, keyLength, hashMapResults[hashMapResultCount]);
+            }
 
             /*
              * Common inner join result processing.

http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
index 762b6fa..4b8ab58 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java
@@ -179,13 +179,17 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
          * Single-Column Long specific repeated lookup.
          */
 
-        long key = vector[0];
         JoinUtil.JoinResult joinResult;
-        if (useMinMax && (key < min || key > max)) {
-          // Out of range for whole batch.
+        if (!joinColVector.noNulls && joinColVector.isNull[0]) {
           joinResult = JoinUtil.JoinResult.NOMATCH;
         } else {
-          joinResult = hashSet.contains(key, hashSetResults[0]);
+          long key = vector[0];
+          if (useMinMax && (key < min || key > max)) {
+            // Out of range for whole batch.
+            joinResult = JoinUtil.JoinResult.NOMATCH;
+          } else {
+            joinResult = hashSet.contains(key, hashSetResults[0]);
+          }
         }
 
         /*
@@ -234,13 +238,21 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
            * Single-Column Long get key.
            */
 
-          long currentKey = vector[batchIndex];
+          long currentKey;
+          boolean isNull;
+          if (!joinColVector.noNulls && joinColVector.isNull[batchIndex]) {
+            currentKey = 0;
+            isNull = true;
+          } else {
+            currentKey = vector[batchIndex];
+            isNull = false;
+          }
 
           /*
            * Equal key series checking.
            */
 
-          if (!haveSaveKey || currentKey != saveKey) {
+          if (isNull || !haveSaveKey || currentKey != saveKey) {
 
             // New key.
 
@@ -259,25 +271,30 @@ public class VectorMapJoinLeftSemiLongOperator extends VectorMapJoinLeftSemiGene
               }
             }
 
-            // Regardless of our matching result, we keep that information to make multiple use
-            // of it for a possible series of equal keys.
-            haveSaveKey = true;
-
-            /*
-             * Single-Column Long specific save key.
-             */
-
-            saveKey = currentKey;
-
-            /*
-             * Single-Column Long specific lookup key.
-             */
-
-            if (useMinMax && (currentKey < min || currentKey > max)) {
-              // Key out of range for whole hash table.
+            if (isNull) {
               saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              haveSaveKey = false;
             } else {
-              saveJoinResult = hashSet.contains(currentKey, hashSetResults[hashSetResultCount]);
+              // Regardless of our matching result, we keep that information to make multiple use
+              // of it for a possible series of equal keys.
+              haveSaveKey = true;
+  
+              /*
+               * Single-Column Long specific save key.
+               */
+  
+              saveKey = currentKey;
+  
+              /*
+               * Single-Column Long specific lookup key.
+               */
+  
+              if (useMinMax && (currentKey < min || currentKey > max)) {
+                // Key out of range for whole hash table.
+                saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              } else {
+                saveJoinResult = hashSet.contains(currentKey, hashSetResults[hashSetResultCount]);
+              }
             }
 
             /*

http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
index a7a51f7..bdf7901 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java
@@ -195,14 +195,15 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
 
         keyVectorSerializeWrite.setOutput(currentKeyOutput);
         keyVectorSerializeWrite.serializeWrite(batch, 0);
+        JoinUtil.JoinResult joinResult;
         if (keyVectorSerializeWrite.getHasAnyNulls()) {
-          // Not expecting NULLs in MapJoin -- they should have been filtered out.
-          throw new HiveException("Null key not expected in MapJoin");
+          joinResult = JoinUtil.JoinResult.NOMATCH;
+        } else {
+          byte[] keyBytes = currentKeyOutput.getData();
+          int keyLength = currentKeyOutput.getLength();
+          // LOG.debug(CLASS_NAME + " processOp all " + displayBytes(keyBytes, 0, keyLength));
+          joinResult = hashSet.contains(keyBytes, 0, keyLength, hashSetResults[0]);
         }
-        byte[] keyBytes = currentKeyOutput.getData();
-        int keyLength = currentKeyOutput.getLength();
-        // LOG.debug(CLASS_NAME + " processOp all " + displayBytes(keyBytes, 0, keyLength));
-        JoinUtil.JoinResult joinResult = hashSet.contains(keyBytes, 0, keyLength, hashSetResults[0]);
 
         /*
          * Common repeated join result processing.
@@ -253,10 +254,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
           // Generate binary sortable key for current row in vectorized row batch.
           keyVectorSerializeWrite.setOutput(currentKeyOutput);
           keyVectorSerializeWrite.serializeWrite(batch, batchIndex);
-          if (keyVectorSerializeWrite.getHasAnyNulls()) {
-            // Not expecting NULLs in MapJoin -- they should have been filtered out.
-            throw new HiveException("Null key not expected in MapJoin");
-          }
+          boolean isAnyNull = keyVectorSerializeWrite.getHasAnyNulls();
 
           // LOG.debug(CLASS_NAME + " currentKey " +
           //      VectorizedBatchUtil.displayBytes(currentKeyOutput.getData(), 0, currentKeyOutput.getLength()));
@@ -265,7 +263,7 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
            * Equal key series checking.
            */
 
-          if (!haveSaveKey || !saveKeyOutput.arraysEquals(currentKeyOutput)) {
+          if (isAnyNull || !haveSaveKey || !saveKeyOutput.arraysEquals(currentKeyOutput)) {
 
             // New key.
 
@@ -284,25 +282,30 @@ public class VectorMapJoinLeftSemiMultiKeyOperator extends VectorMapJoinLeftSemi
               }
             }
 
-            // Regardless of our matching result, we keep that information to make multiple use
-            // of it for a possible series of equal keys.
-            haveSaveKey = true;
-
-            /*
-             * Multi-Key specific save key and lookup.
-             */
-
-            temp = saveKeyOutput;
-            saveKeyOutput = currentKeyOutput;
-            currentKeyOutput = temp;
-
-            /*
-             * Multi-key specific lookup key.
-             */
-
-            byte[] keyBytes = saveKeyOutput.getData();
-            int keyLength = saveKeyOutput.getLength();
-            saveJoinResult = hashSet.contains(keyBytes, 0, keyLength, hashSetResults[hashSetResultCount]);
+            if (isAnyNull) {
+              saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              haveSaveKey = false;
+            } else {
+              // Regardless of our matching result, we keep that information to make multiple use
+              // of it for a possible series of equal keys.
+              haveSaveKey = true;
+  
+              /*
+               * Multi-Key specific save key and lookup.
+               */
+  
+              temp = saveKeyOutput;
+              saveKeyOutput = currentKeyOutput;
+              currentKeyOutput = temp;
+  
+              /*
+               * Multi-key specific lookup key.
+               */
+  
+              byte[] keyBytes = saveKeyOutput.getData();
+              int keyLength = saveKeyOutput.getLength();
+              saveJoinResult = hashSet.contains(keyBytes, 0, keyLength, hashSetResults[hashSetResultCount]);
+            }
 
             /*
              * Common left-semi join result processing.

http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
index eaa3af4..a8d3459 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java
@@ -172,10 +172,15 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
          * Single-Column String specific repeated lookup.
          */
 
-        byte[] keyBytes = vector[0];
-        int keyStart = start[0];
-        int keyLength = length[0];
-        JoinUtil.JoinResult joinResult = hashSet.contains(keyBytes, keyStart, keyLength, hashSetResults[0]);
+        JoinUtil.JoinResult joinResult;
+        if (!joinColVector.noNulls && joinColVector.isNull[0]) {
+          joinResult = JoinUtil.JoinResult.NOMATCH;
+        } else {
+          byte[] keyBytes = vector[0];
+          int keyStart = start[0];
+          int keyLength = length[0];
+          joinResult = hashSet.contains(keyBytes, keyStart, keyLength, hashSetResults[0]);
+        }
 
         /*
          * Common repeated join result processing.
@@ -224,12 +229,13 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
            */
 
           // Implicit -- use batchIndex.
+          boolean isNull = !joinColVector.noNulls && joinColVector.isNull[batchIndex];
 
           /*
            * Equal key series checking.
            */
 
-          if (!haveSaveKey ||
+          if (isNull || !haveSaveKey ||
               StringExpr.equal(vector[saveKeyBatchIndex], start[saveKeyBatchIndex], length[saveKeyBatchIndex],
                                  vector[batchIndex], start[batchIndex], length[batchIndex]) == false) {
 
@@ -250,24 +256,29 @@ public class VectorMapJoinLeftSemiStringOperator extends VectorMapJoinLeftSemiGe
               }
             }
 
-            // Regardless of our matching result, we keep that information to make multiple use
-            // of it for a possible series of equal keys.
-            haveSaveKey = true;
-
-            /*
-             * Single-Column String specific save key and lookup.
-             */
-
-            saveKeyBatchIndex = batchIndex;
-
-            /*
-             * Single-Column String specific lookup key.
-             */
-
-            byte[] keyBytes = vector[batchIndex];
-            int keyStart = start[batchIndex];
-            int keyLength = length[batchIndex];
-            saveJoinResult = hashSet.contains(keyBytes, keyStart, keyLength, hashSetResults[hashSetResultCount]);
+            if (isNull) {
+              saveJoinResult = JoinUtil.JoinResult.NOMATCH;
+              haveSaveKey = false;
+            } else {
+              // Regardless of our matching result, we keep that information to make multiple use
+              // of it for a possible series of equal keys.
+              haveSaveKey = true;
+  
+              /*
+               * Single-Column String specific save key and lookup.
+               */
+  
+              saveKeyBatchIndex = batchIndex;
+  
+              /*
+               * Single-Column String specific lookup key.
+               */
+  
+              byte[] keyBytes = vector[batchIndex];
+              int keyStart = start[batchIndex];
+              int keyLength = length[batchIndex];
+              saveJoinResult = hashSet.contains(keyBytes, keyStart, keyLength, hashSetResults[hashSetResultCount]);
+            }
 
             /*
              * Common left-semi join result processing.

http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
index dfc9bf1..f37f056 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java
@@ -81,12 +81,7 @@ public abstract class VectorMapJoinFastLongHashTable
     int keyLength = currentKey.getLength();
     keyBinarySortableDeserializeRead.set(keyBytes, 0, keyLength);
     if (keyBinarySortableDeserializeRead.readCheckNull()) {
-      if (isOuterJoin) {
-        return;
-      } else {
-        // For inner join, we expect all NULL values to have been filtered out before now.
-        throw new HiveException("Unexpected NULL in map join small table");
-      }
+      return;
     }
 
     long key = VectorMapJoinFastLongHashUtil.deserializeLongKey(

http://git-wip-us.apache.org/repos/asf/hive/blob/2da3436d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringCommon.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringCommon.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringCommon.java
index 5c7792f..adb8044 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringCommon.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastStringCommon.java
@@ -26,12 +26,16 @@ import org.apache.hadoop.hive.serde2.fast.DeserializeRead.ReadStringResults;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.io.BytesWritable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /*
  * An single byte array value hash map optimized for vector map join.
  */
 public class VectorMapJoinFastStringCommon {
 
+  public static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastStringCommon.class);
+
   private boolean isOuterJoin;
 
   private BinarySortableDeserializeRead keyBinarySortableDeserializeRead;
@@ -45,12 +49,7 @@ public class VectorMapJoinFastStringCommon {
     int keyLength = currentKey.getLength();
     keyBinarySortableDeserializeRead.set(keyBytes, 0, keyLength);
     if (keyBinarySortableDeserializeRead.readCheckNull()) {
-      if (isOuterJoin) {
-        return;
-      } else {
-        // For inner join, we expect all NULL values to have been filtered out before now.
-        throw new HiveException("Unexpected NULL in map join small table");
-      }
+      return;
     }
     keyBinarySortableDeserializeRead.readString(readStringResults);
 


[17/27] hive git commit: HIVE-12476: Metastore NPE on Oracle with Direct SQL (Jason Dere, reviewed by Sushanth Sowmyan)

Posted by om...@apache.org.
HIVE-12476: Metastore NPE on Oracle with Direct SQL (Jason Dere, reviewed by Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/60cb16bb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/60cb16bb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/60cb16bb

Branch: refs/heads/master-fixed
Commit: 60cb16bb1c79de7481f42735efdb86b908aa1526
Parents: 0f4065e
Author: Jason Dere <jd...@hortonworks.com>
Authored: Wed Nov 25 15:20:51 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:36 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/metastore/MetaStoreDirectSql.java       | 12 ++++++++++++
 1 file changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/60cb16bb/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 08153ca..d76e77f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -646,6 +646,10 @@ class MetaStoreDirectSql {
       public void apply(Partition t, Object[] fields) {
         t.putToParameters((String)fields[1], (String)fields[2]);
       }});
+    // Perform conversion of null map values
+    for (Partition t : partitions.values()) {
+      t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
+    }
 
     queryText = "select \"PART_ID\", \"PART_KEY_VAL\" from \"PARTITION_KEY_VALS\""
         + " where \"PART_ID\" in (" + partIds + ") and \"INTEGER_IDX\" >= 0"
@@ -673,6 +677,10 @@ class MetaStoreDirectSql {
       public void apply(StorageDescriptor t, Object[] fields) {
         t.putToParameters((String)fields[1], (String)fields[2]);
       }});
+    // Perform conversion of null map values
+    for (StorageDescriptor t : sds.values()) {
+      t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
+    }
 
     queryText = "select \"SD_ID\", \"COLUMN_NAME\", \"SORT_COLS\".\"ORDER\" from \"SORT_COLS\""
         + " where \"SD_ID\" in (" + sdIds + ") and \"INTEGER_IDX\" >= 0"
@@ -810,6 +818,10 @@ class MetaStoreDirectSql {
       public void apply(SerDeInfo t, Object[] fields) {
         t.putToParameters((String)fields[1], (String)fields[2]);
       }});
+    // Perform conversion of null map values
+    for (SerDeInfo t : serdes.values()) {
+      t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
+    }
 
     return orderedResult;
   }


[03/27] hive git commit: HIVE-12329 :Turn on limit pushdown optimization by default (Ashutosh Chauhan via Prasanth J)

Posted by om...@apache.org.
HIVE-12329 :Turn on limit pushdown optimization by default (Ashutosh Chauhan via Prasanth J)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/249bcd80
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/249bcd80
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/249bcd80

Branch: refs/heads/master-fixed
Commit: 249bcd80d0ccc82b35cf5d8f0723d05390e1f7ff
Parents: a53d2af
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Tue Nov 3 17:26:00 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:34 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  4 +--
 .../clientpositive/annotate_stats_select.q.out  |  4 +++
 .../auto_join_without_localtask.q.out           |  3 +++
 .../results/clientpositive/bucket_groupby.q.out | 10 ++++++++
 .../results/clientpositive/bucketmapjoin7.q.out |  2 ++
 ql/src/test/results/clientpositive/cp_sel.q.out |  3 ++-
 ql/src/test/results/clientpositive/ctas.q.out   | 12 +++++++++
 .../results/clientpositive/ctas_colname.q.out   |  2 ++
 .../dynpart_sort_opt_vectorization.q.out        |  3 +++
 .../dynpart_sort_optimization.q.out             |  3 +++
 .../encryption_insert_partition_dynamic.q.out   |  2 ++
 .../test/results/clientpositive/gby_star.q.out  |  4 +++
 .../results/clientpositive/groupby1_limit.q.out |  2 ++
 .../results/clientpositive/groupby2_limit.q.out |  1 +
 .../groupby7_noskew_multi_single_reducer.q.out  |  2 ++
 ...pby_complex_types_multi_single_reducer.q.out |  2 ++
 .../groupby_multi_single_reducer.q.out          |  1 +
 .../results/clientpositive/input11_limit.q.out  |  1 +
 .../results/clientpositive/input14_limit.q.out  |  2 ++
 .../results/clientpositive/input1_limit.q.out   |  2 ++
 .../test/results/clientpositive/input22.q.out   |  1 +
 .../test/results/clientpositive/input25.q.out   |  2 ++
 .../test/results/clientpositive/input26.q.out   |  2 ++
 .../results/clientpositive/input3_limit.q.out   |  2 ++
 .../results/clientpositive/input4_limit.q.out   |  2 ++
 .../results/clientpositive/input_part10.q.out   |  1 +
 .../insert1_overwrite_partitions.q.out          |  2 ++
 .../insert2_overwrite_partitions.q.out          |  2 ++
 .../results/clientpositive/insert_into1.q.out   |  3 +++
 .../results/clientpositive/insert_into2.q.out   |  3 +++
 .../results/clientpositive/insert_into3.q.out   |  4 +++
 .../results/clientpositive/insert_into4.q.out   |  2 ++
 .../results/clientpositive/insert_into5.q.out   |  1 +
 .../results/clientpositive/insert_into6.q.out   |  1 +
 .../test/results/clientpositive/join_vc.q.out   |  1 +
 .../results/clientpositive/lateral_view.q.out   |  3 +++
 .../clientpositive/lateral_view_explode2.q.out  |  2 ++
 .../clientpositive/lateral_view_noalias.q.out   |  4 +++
 .../clientpositive/lateral_view_onview.q.out    |  3 +++
 .../clientpositive/load_dyn_part14.q.out        |  3 +++
 ql/src/test/results/clientpositive/merge4.q.out |  2 ++
 .../nonreserved_keywords_insert_into1.q.out     |  3 +++
 .../results/clientpositive/orc_createas1.q.out  |  1 +
 .../clientpositive/orc_predicate_pushdown.q.out |  6 +++++
 ql/src/test/results/clientpositive/order.q.out  |  2 ++
 ql/src/test/results/clientpositive/order2.q.out |  1 +
 .../parquet_predicate_pushdown.q.out            |  8 ++++++
 ql/src/test/results/clientpositive/pcr.q.out    |  2 ++
 .../test/results/clientpositive/regex_col.q.out |  1 +
 .../results/clientpositive/script_pipe.q.out    |  1 +
 .../clientpositive/select_as_omitted.q.out      |  1 +
 .../clientpositive/skewjoin_noskew.q.out        |  1 +
 .../results/clientpositive/smb_mapjoin_13.q.out |  4 +++
 .../results/clientpositive/smb_mapjoin_15.q.out |  8 ++++++
 .../spark/auto_join_without_localtask.q.out     |  3 +++
 .../clientpositive/spark/bucketmapjoin7.q.out   |  2 ++
 .../results/clientpositive/spark/ctas.q.out     | 12 +++++++++
 .../groupby7_noskew_multi_single_reducer.q.out  |  1 +
 ...pby_complex_types_multi_single_reducer.q.out |  2 ++
 .../spark/groupby_multi_single_reducer.q.out    |  1 +
 .../clientpositive/spark/input1_limit.q.out     |  2 ++
 .../clientpositive/spark/insert_into1.q.out     |  3 +++
 .../clientpositive/spark/insert_into2.q.out     |  3 +++
 .../clientpositive/spark/insert_into3.q.out     | 18 ++++++++++++-
 .../results/clientpositive/spark/join_vc.q.out  |  1 +
 .../spark/lateral_view_explode2.q.out           |  2 ++
 .../clientpositive/spark/load_dyn_part14.q.out  |  1 +
 .../results/clientpositive/spark/order.q.out    |  2 ++
 .../results/clientpositive/spark/order2.q.out   |  1 +
 .../test/results/clientpositive/spark/pcr.q.out |  2 ++
 .../clientpositive/spark/script_pipe.q.out      |  1 +
 .../clientpositive/spark/skewjoin_noskew.q.out  |  1 +
 .../clientpositive/spark/smb_mapjoin_13.q.out   |  4 +++
 .../clientpositive/spark/smb_mapjoin_15.q.out   |  8 ++++++
 .../clientpositive/spark/subquery_in.q.out      |  2 ++
 .../clientpositive/spark/temp_table.q.out       |  3 +++
 .../results/clientpositive/spark/union3.q.out   |  1 +
 .../clientpositive/spark/union_remove_25.q.out  |  2 ++
 .../clientpositive/spark/union_top_level.q.out  | 13 ++++++++++
 .../spark/vector_cast_constant.q.java1.7.out    |  1 +
 .../spark/vector_data_types.q.out               |  2 ++
 .../spark/vector_string_concat.q.out            |  1 +
 .../clientpositive/spark/vectorization_13.q.out |  2 ++
 .../spark/vectorization_div0.q.out              |  2 ++
 .../spark/vectorization_part_project.q.out      |  1 +
 .../spark/vectorization_short_regress.q.out     |  6 +++++
 .../results/clientpositive/subquery_in.q.out    |  2 ++
 .../results/clientpositive/subquery_notin.q.out |  6 +++++
 .../subquery_unqualcolumnrefs.q.out             |  4 +++
 .../results/clientpositive/temp_table.q.out     |  3 +++
 .../test/results/clientpositive/tez/ctas.q.out  | 12 +++++++++
 .../tez/dynpart_sort_opt_vectorization.q.out    | 27 +++++++++++---------
 .../tez/dynpart_sort_optimization.q.out         | 27 +++++++++++---------
 .../clientpositive/tez/insert_into1.q.out       |  3 +++
 .../clientpositive/tez/insert_into2.q.out       |  3 +++
 .../clientpositive/tez/script_pipe.q.out        |  1 +
 .../clientpositive/tez/subquery_in.q.out        |  2 ++
 .../results/clientpositive/tez/temp_table.q.out |  3 +++
 .../results/clientpositive/tez/union3.q.out     |  4 +++
 .../tez/vector_cast_constant.q.java1.7.out      |  1 +
 .../clientpositive/tez/vector_char_2.q.out      |  2 ++
 .../clientpositive/tez/vector_char_simple.q.out |  3 +++
 .../clientpositive/tez/vector_coalesce.q.out    |  5 ++++
 .../clientpositive/tez/vector_data_types.q.out  |  2 ++
 .../tez/vector_decimal_expressions.q.out        |  1 +
 .../tez/vector_groupby_reduce.q.out             |  1 +
 .../tez/vector_mr_diff_schema_alias.q.out       |  1 +
 .../tez/vector_non_string_partition.q.out       |  2 ++
 .../tez/vector_partitioned_date_time.q.out      |  6 +++++
 .../tez/vector_reduce_groupby_decimal.q.out     |  1 +
 .../tez/vector_string_concat.q.out              |  1 +
 .../tez/vector_varchar_simple.q.out             |  3 +++
 .../clientpositive/tez/vectorization_13.q.out   |  2 ++
 .../clientpositive/tez/vectorization_7.q.out    |  2 ++
 .../clientpositive/tez/vectorization_8.q.out    |  2 ++
 .../clientpositive/tez/vectorization_div0.q.out |  2 ++
 .../tez/vectorization_part_project.q.out        |  1 +
 .../tez/vectorization_short_regress.q.out       |  6 +++++
 .../udf_case_column_pruning.q.out               |  1 +
 ql/src/test/results/clientpositive/union3.q.out |  4 +++
 .../clientpositive/union_remove_25.q.out        |  4 +++
 .../clientpositive/union_top_level.q.out        | 14 ++++++++++
 .../vector_cast_constant.q.java1.7.out          |  1 +
 .../results/clientpositive/vector_char_2.q.out  |  2 ++
 .../clientpositive/vector_char_simple.q.out     |  3 +++
 .../clientpositive/vector_coalesce.q.out        |  5 ++++
 .../clientpositive/vector_data_types.q.out      |  2 ++
 .../vector_decimal_expressions.q.out            |  1 +
 .../clientpositive/vector_groupby_reduce.q.out  |  1 +
 .../vector_mr_diff_schema_alias.q.out           |  1 +
 .../vector_non_string_partition.q.out           |  2 ++
 .../vector_partitioned_date_time.q.out          |  6 +++++
 .../vector_reduce_groupby_decimal.q.out         |  1 +
 .../clientpositive/vector_string_concat.q.out   |  1 +
 .../clientpositive/vector_varchar_simple.q.out  |  3 +++
 .../clientpositive/vectorization_13.q.out       |  2 ++
 .../clientpositive/vectorization_7.q.out        |  2 ++
 .../clientpositive/vectorization_8.q.out        |  2 ++
 .../clientpositive/vectorization_div0.q.out     |  2 ++
 .../vectorization_part_project.q.out            |  1 +
 .../vectorization_short_regress.q.out           |  6 +++++
 141 files changed, 436 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 2bd850d..db942b0 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1156,8 +1156,8 @@ public class HiveConf extends Configuration {
     HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000,
         "Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" +
         "Insert queries are not restricted by this limit."),
-    HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", -1f,
-        "The max memory to be used for hash in RS operator for top K selection."),
+    HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(),
+        "The fraction of available memory to be used for buffering rows in Reducesink operator for limit pushdown optimization."),
     HIVELIMITTABLESCANPARTITION("hive.limit.query.max.table.partition", -1,
         "This controls how many partitions can be scanned for each partitioned table.\n" +
         "The default value \"-1\" means no limit."),

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/annotate_stats_select.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_select.q.out b/ql/src/test/results/clientpositive/annotate_stats_select.q.out
index bd645c8..c4d59c8 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_select.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_select.q.out
@@ -888,6 +888,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: int)
       Reduce Operator Tree:
         Select Operator
@@ -972,6 +973,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: int)
       Reduce Operator Tree:
         Select Operator
@@ -1029,6 +1031,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
           Number of rows: 10
@@ -1050,6 +1053,7 @@ STAGE PLANS:
             Reduce Output Operator
               sort order: 
               Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
           Number of rows: 10

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out b/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
index 3d0067b..9fbdd39 100644
--- a/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
+++ b/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
@@ -79,6 +79,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string)
               sort order: ++
               Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -389,6 +390,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string)
               sort order: ++
               Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -780,6 +782,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string)
               sort order: ++
               Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/bucket_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_groupby.q.out b/ql/src/test/results/clientpositive/bucket_groupby.q.out
index 1ac5287..0e30801 100644
--- a/ql/src/test/results/clientpositive/bucket_groupby.q.out
+++ b/ql/src/test/results/clientpositive/bucket_groupby.q.out
@@ -72,6 +72,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -177,6 +178,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -256,6 +258,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -326,6 +329,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -398,6 +402,7 @@ STAGE PLANS:
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -481,6 +486,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -1046,6 +1052,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -1123,6 +1130,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -1321,6 +1329,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -1398,6 +1407,7 @@ STAGE PLANS:
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/bucketmapjoin7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketmapjoin7.q.out b/ql/src/test/results/clientpositive/bucketmapjoin7.q.out
index 56b156e..639cf17 100644
--- a/ql/src/test/results/clientpositive/bucketmapjoin7.q.out
+++ b/ql/src/test/results/clientpositive/bucketmapjoin7.q.out
@@ -251,6 +251,8 @@ STAGE PLANS:
                     sort order: ++
                     Statistics: Num rows: 378 Data size: 1514 Basic stats: COMPLETE Column stats: NONE
                     tag: -1
+                    TopN: 1
+                    TopN Hash Memory Usage: 0.1
                     auto parallelism: false
       Local Work:
         Map Reduce Local Work

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/cp_sel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cp_sel.q.out b/ql/src/test/results/clientpositive/cp_sel.q.out
index a55b28d..7c3d0fd 100644
--- a/ql/src/test/results/clientpositive/cp_sel.q.out
+++ b/ql/src/test/results/clientpositive/cp_sel.q.out
@@ -23,6 +23,7 @@ STAGE PLANS:
                 key expressions: 1 (type: int)
                 sort order: +
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -58,7 +59,7 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
-97	val_97	hello	world
+238	val_238	hello	world
 PREHOOK: query: create table testpartbucket (key string, value string) partitioned by (ds string, hr string) clustered by(key) sorted by(key) into 2 buckets
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out
index 048eadd..cb8a5c8 100644
--- a/ql/src/test/results/clientpositive/ctas.q.out
+++ b/ql/src/test/results/clientpositive/ctas.q.out
@@ -46,6 +46,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: ++
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -69,6 +70,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string)
               sort order: ++
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -193,6 +195,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: ++
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -216,6 +219,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string)
               sort order: ++
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -340,6 +344,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: double), _col1 (type: string)
                 sort order: ++
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
@@ -363,6 +368,7 @@ STAGE PLANS:
               key expressions: _col0 (type: double), _col1 (type: string)
               sort order: ++
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
@@ -551,6 +557,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: ++
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -574,6 +581,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string)
               sort order: ++
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -743,6 +751,8 @@ STAGE PLANS:
                 sort order: ++
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 tag: -1
+                TopN: 10
+                TopN Hash Memory Usage: 0.1
                 auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -830,6 +840,8 @@ STAGE PLANS:
               sort order: ++
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
               tag: -1
+              TopN: 10
+              TopN Hash Memory Usage: 0.1
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/ctas_colname.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out
index 2a70faf..d5ac6c0 100644
--- a/ql/src/test/results/clientpositive/ctas_colname.q.out
+++ b/ql/src/test/results/clientpositive/ctas_colname.q.out
@@ -41,6 +41,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: double), _col3 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -386,6 +387,7 @@ STAGE PLANS:
             Reduce Output Operator
               sort order: 
               Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
index 2f12b8d..6aa270d 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
@@ -265,6 +265,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -622,6 +623,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -1435,6 +1437,7 @@ STAGE PLANS:
                 key expressions: _col2 (type: int)
                 sort order: +
                 Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float)
       Execution mode: vectorized
       Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
index 8fd744b..624f970 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
@@ -199,6 +199,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
       Reduce Operator Tree:
         Select Operator
@@ -530,6 +531,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
       Reduce Operator Tree:
         Select Operator
@@ -1338,6 +1340,7 @@ STAGE PLANS:
                 key expressions: _col2 (type: int)
                 sort order: +
                 Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
index 13fae42..3564e84 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
@@ -290,6 +290,8 @@ STAGE PLANS:
                   sort order: 
                   Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE
                   tag: -1
+                  TopN: 2
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
                   auto parallelism: false
       Path -> Alias:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/gby_star.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/gby_star.q.out b/ql/src/test/results/clientpositive/gby_star.q.out
index fb71835..26609a1 100644
--- a/ql/src/test/results/clientpositive/gby_star.q.out
+++ b/ql/src/test/results/clientpositive/gby_star.q.out
@@ -30,6 +30,7 @@ STAGE PLANS:
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: double)
       Reduce Operator Tree:
         Group By Operator
@@ -104,6 +105,7 @@ STAGE PLANS:
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: double)
       Reduce Operator Tree:
         Group By Operator
@@ -178,6 +180,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: double)
       Reduce Operator Tree:
         Group By Operator
@@ -299,6 +302,7 @@ STAGE PLANS:
               sort order: +
               Map-reduce partition columns: _col0 (type: string)
               Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: double)
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/groupby1_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby1_limit.q.out b/ql/src/test/results/clientpositive/groupby1_limit.q.out
index 0607258..aacd23c 100644
--- a/ql/src/test/results/clientpositive/groupby1_limit.q.out
+++ b/ql/src/test/results/clientpositive/groupby1_limit.q.out
@@ -44,6 +44,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: double)
       Reduce Operator Tree:
         Group By Operator
@@ -69,6 +70,7 @@ STAGE PLANS:
             Reduce Output Operator
               sort order: 
               Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: double)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/groupby2_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby2_limit.q.out b/ql/src/test/results/clientpositive/groupby2_limit.q.out
index ef9531a..bda150a 100644
--- a/ql/src/test/results/clientpositive/groupby2_limit.q.out
+++ b/ql/src/test/results/clientpositive/groupby2_limit.q.out
@@ -54,6 +54,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: double)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
index cb6c73c..44b664f 100644
--- a/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
@@ -90,6 +90,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -133,6 +134,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: double)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out
index 0564056..d2cb6f4 100644
--- a/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out
@@ -99,6 +99,7 @@ STAGE PLANS:
               key expressions: _col0 (type: array<string>)
               sort order: +
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Select Operator
@@ -162,6 +163,7 @@ STAGE PLANS:
               key expressions: _col0 (type: map<string,string>)
               sort order: +
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out
index 35b8d46..1381d91 100644
--- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out
@@ -480,6 +480,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: bigint)
               sort order: ++
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col2 (type: string), _col3 (type: double), _col4 (type: bigint)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/input11_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input11_limit.q.out b/ql/src/test/results/clientpositive/input11_limit.q.out
index f9ac14e..92db5a9 100644
--- a/ql/src/test/results/clientpositive/input11_limit.q.out
+++ b/ql/src/test/results/clientpositive/input11_limit.q.out
@@ -43,6 +43,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/input14_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input14_limit.q.out b/ql/src/test/results/clientpositive/input14_limit.q.out
index cdf9712..9870ad5 100644
--- a/ql/src/test/results/clientpositive/input14_limit.q.out
+++ b/ql/src/test/results/clientpositive/input14_limit.q.out
@@ -53,6 +53,7 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -78,6 +79,7 @@ STAGE PLANS:
               sort order: +
               Map-reduce partition columns: _col0 (type: string)
               Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/input1_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input1_limit.q.out b/ql/src/test/results/clientpositive/input1_limit.q.out
index 6470427..0ceb153 100644
--- a/ql/src/test/results/clientpositive/input1_limit.q.out
+++ b/ql/src/test/results/clientpositive/input1_limit.q.out
@@ -56,6 +56,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string)
             Filter Operator
               predicate: (key < 100) (type: boolean)
@@ -114,6 +115,7 @@ STAGE PLANS:
             Reduce Output Operator
               sort order: 
               Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/input22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input22.q.out b/ql/src/test/results/clientpositive/input22.q.out
index eecbcd3..c74ba41 100644
--- a/ql/src/test/results/clientpositive/input22.q.out
+++ b/ql/src/test/results/clientpositive/input22.q.out
@@ -45,6 +45,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/input25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input25.q.out b/ql/src/test/results/clientpositive/input25.q.out
index d0a97fa..b11b0ff 100644
--- a/ql/src/test/results/clientpositive/input25.q.out
+++ b/ql/src/test/results/clientpositive/input25.q.out
@@ -57,6 +57,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: int), _col1 (type: int)
       Reduce Operator Tree:
         Select Operator
@@ -117,6 +118,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: int), _col1 (type: int)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/input26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input26.q.out b/ql/src/test/results/clientpositive/input26.q.out
index b917b86..cf2ce16 100644
--- a/ql/src/test/results/clientpositive/input26.q.out
+++ b/ql/src/test/results/clientpositive/input26.q.out
@@ -33,6 +33,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -104,6 +105,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/input3_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input3_limit.q.out b/ql/src/test/results/clientpositive/input3_limit.q.out
index f3e0152..c7bc4ac 100644
--- a/ql/src/test/results/clientpositive/input3_limit.q.out
+++ b/ql/src/test/results/clientpositive/input3_limit.q.out
@@ -58,6 +58,7 @@ STAGE PLANS:
                 sort order: ++
                 Map-reduce partition columns: _col0 (type: string)
                 Statistics: Num rows: 58 Data size: 11603 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -80,6 +81,7 @@ STAGE PLANS:
             Reduce Output Operator
               sort order: 
               Statistics: Num rows: 20 Data size: 4000 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/input4_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input4_limit.q.out b/ql/src/test/results/clientpositive/input4_limit.q.out
index 82e5a14..6193a74 100644
--- a/ql/src/test/results/clientpositive/input4_limit.q.out
+++ b/ql/src/test/results/clientpositive/input4_limit.q.out
@@ -24,6 +24,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -48,6 +49,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/input_part10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part10.q.out b/ql/src/test/results/clientpositive/input_part10.q.out
index e4058b0..c8fb37e 100644
--- a/ql/src/test/results/clientpositive/input_part10.q.out
+++ b/ql/src/test/results/clientpositive/input_part10.q.out
@@ -52,6 +52,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: 1 (type: int), 2 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/insert1_overwrite_partitions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert1_overwrite_partitions.q.out b/ql/src/test/results/clientpositive/insert1_overwrite_partitions.q.out
index 900babe..49c1269 100644
--- a/ql/src/test/results/clientpositive/insert1_overwrite_partitions.q.out
+++ b/ql/src/test/results/clientpositive/insert1_overwrite_partitions.q.out
@@ -58,6 +58,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: --
                 Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -190,6 +191,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: --
                 Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out b/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out
index 25c438f..b5f28d2 100644
--- a/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out
+++ b/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out
@@ -69,6 +69,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: --
                 Statistics: Num rows: 30 Data size: 6028 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -156,6 +157,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: --
                 Statistics: Num rows: 30 Data size: 6028 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into1.q.out b/ql/src/test/results/clientpositive/insert_into1.q.out
index 7f3112c..6e45db8 100644
--- a/ql/src/test/results/clientpositive/insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/insert_into1.q.out
@@ -38,6 +38,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -145,6 +146,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -252,6 +254,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/insert_into2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into2.q.out b/ql/src/test/results/clientpositive/insert_into2.q.out
index 737e576..757a85c 100644
--- a/ql/src/test/results/clientpositive/insert_into2.q.out
+++ b/ql/src/test/results/clientpositive/insert_into2.q.out
@@ -38,6 +38,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -186,6 +187,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -303,6 +305,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/insert_into3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into3.q.out b/ql/src/test/results/clientpositive/insert_into3.q.out
index 56e1998..ae7523b 100644
--- a/ql/src/test/results/clientpositive/insert_into3.q.out
+++ b/ql/src/test/results/clientpositive/insert_into3.q.out
@@ -51,6 +51,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: ++
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
             Select Operator
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
@@ -103,6 +104,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col1 (type: string)
               sort order: ++
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
@@ -210,6 +212,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
             Select Operator
               expressions: key (type: string), value (type: string)
@@ -265,6 +268,7 @@ STAGE PLANS:
             Reduce Output Operator
               sort order: 
               Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/insert_into4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into4.q.out b/ql/src/test/results/clientpositive/insert_into4.q.out
index 192e60e..07ff99b 100644
--- a/ql/src/test/results/clientpositive/insert_into4.q.out
+++ b/ql/src/test/results/clientpositive/insert_into4.q.out
@@ -48,6 +48,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -132,6 +133,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/insert_into5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into5.q.out b/ql/src/test/results/clientpositive/insert_into5.q.out
index ea88b96..b9510b9 100644
--- a/ql/src/test/results/clientpositive/insert_into5.q.out
+++ b/ql/src/test/results/clientpositive/insert_into5.q.out
@@ -46,6 +46,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 10 Data size: 910 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: 1 (type: int), 'one' (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/insert_into6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into6.q.out b/ql/src/test/results/clientpositive/insert_into6.q.out
index 388b200..d93a167 100644
--- a/ql/src/test/results/clientpositive/insert_into6.q.out
+++ b/ql/src/test/results/clientpositive/insert_into6.q.out
@@ -50,6 +50,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 150 Data size: 1500 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/join_vc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_vc.q.out b/ql/src/test/results/clientpositive/join_vc.q.out
index 9c7e110..2e3fab7 100644
--- a/ql/src/test/results/clientpositive/join_vc.q.out
+++ b/ql/src/test/results/clientpositive/join_vc.q.out
@@ -120,6 +120,7 @@ STAGE PLANS:
               key expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: string)
               sort order: +++
               Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: bigint), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lateral_view.q.out b/ql/src/test/results/clientpositive/lateral_view.q.out
index e0cfb25..4dd5bd9 100644
--- a/ql/src/test/results/clientpositive/lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/lateral_view.q.out
@@ -48,6 +48,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string), _col2 (type: int)
                       sort order: ++
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: string)
               Select Operator
                 expressions: array(1,2,3) (type: array<int>)
@@ -67,6 +68,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: string), _col2 (type: int)
                         sort order: ++
                         Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -91,6 +93,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col2 (type: int)
               sort order: ++
               Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/lateral_view_explode2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lateral_view_explode2.q.out b/ql/src/test/results/clientpositive/lateral_view_explode2.q.out
index 65e268d..e28a93d 100644
--- a/ql/src/test/results/clientpositive/lateral_view_explode2.q.out
+++ b/ql/src/test/results/clientpositive/lateral_view_explode2.q.out
@@ -36,6 +36,7 @@ STAGE PLANS:
                       sort order: ++
                       Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                      TopN Hash Memory Usage: 0.1
               Select Operator
                 expressions: array(1,2,3) (type: array<int>)
                 outputColumnNames: _col0
@@ -56,6 +57,7 @@ STAGE PLANS:
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int), KEY._col1 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/lateral_view_noalias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lateral_view_noalias.q.out b/ql/src/test/results/clientpositive/lateral_view_noalias.q.out
index 7988bd7..473e841 100644
--- a/ql/src/test/results/clientpositive/lateral_view_noalias.q.out
+++ b/ql/src/test/results/clientpositive/lateral_view_noalias.q.out
@@ -172,6 +172,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: int)
               Select Operator
                 expressions: map('key1':100,'key2':200) (type: map<string,int>)
@@ -193,6 +194,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string), _col1 (type: int)
       Reduce Operator Tree:
         Select Operator
@@ -269,6 +271,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: string), _col1 (type: int)
               Select Operator
                 expressions: map('key1':100,'key2':200) (type: map<string,int>)
@@ -290,6 +293,7 @@ STAGE PLANS:
                         Reduce Output Operator
                           sort order: 
                           Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                          TopN Hash Memory Usage: 0.1
                           value expressions: _col0 (type: string), _col1 (type: int)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/lateral_view_onview.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lateral_view_onview.q.out b/ql/src/test/results/clientpositive/lateral_view_onview.q.out
index 8f576a5..cfa957f 100644
--- a/ql/src/test/results/clientpositive/lateral_view_onview.q.out
+++ b/ql/src/test/results/clientpositive/lateral_view_onview.q.out
@@ -61,6 +61,7 @@ STAGE PLANS:
                       key expressions: _col0 (type: string), _col4 (type: int)
                       sort order: ++
                       Statistics: Num rows: 1000 Data size: 17000 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
                       value expressions: _col1 (type: array<int>), _col2 (type: int), _col3 (type: char(1))
                 Select Operator
                   expressions: array(1,2,3) (type: array<int>)
@@ -76,6 +77,7 @@ STAGE PLANS:
                         key expressions: _col0 (type: string), _col4 (type: int)
                         sort order: ++
                         Statistics: Num rows: 1000 Data size: 17000 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
                         value expressions: _col1 (type: array<int>), _col2 (type: int), _col3 (type: char(1))
       Reduce Operator Tree:
         Select Operator
@@ -100,6 +102,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string), _col4 (type: int)
               sort order: ++
               Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: array<int>), _col2 (type: int), _col3 (type: char(1))
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/load_dyn_part14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/load_dyn_part14.q.out
index 57a1a93..b35cfaf 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part14.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part14.q.out
@@ -81,6 +81,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 2 Data size: 172 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
           Number of rows: 2
@@ -201,6 +202,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
           Number of rows: 2
@@ -230,6 +232,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 2 Data size: 342 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
           Number of rows: 2

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/merge4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge4.q.out b/ql/src/test/results/clientpositive/merge4.q.out
index 031376f..08e4455 100644
--- a/ql/src/test/results/clientpositive/merge4.q.out
+++ b/ql/src/test/results/clientpositive/merge4.q.out
@@ -2790,6 +2790,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 1 Data size: 259 Basic stats: COMPLETE Column stats: COMPLETE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
           TableScan
             alias: src
@@ -2806,6 +2807,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     sort order: 
                     Statistics: Num rows: 1 Data size: 259 Basic stats: COMPLETE Column stats: COMPLETE
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out b/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out
index e70673e..d01461b 100644
--- a/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out
@@ -36,6 +36,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -120,6 +121,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -213,6 +215,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/orc_createas1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_createas1.q.out b/ql/src/test/results/clientpositive/orc_createas1.q.out
index f5a23db..752f162 100644
--- a/ql/src/test/results/clientpositive/orc_createas1.q.out
+++ b/ql/src/test/results/clientpositive/orc_createas1.q.out
@@ -167,6 +167,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
index 6a528dd..2642a80 100644
--- a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
@@ -778,6 +778,7 @@ STAGE PLANS:
                   key expressions: _col3 (type: string)
                   sort order: -
                   Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -846,6 +847,7 @@ STAGE PLANS:
                   key expressions: _col3 (type: string)
                   sort order: -
                   Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -980,6 +982,7 @@ STAGE PLANS:
                   key expressions: _col3 (type: string)
                   sort order: -
                   Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -1004,6 +1007,7 @@ STAGE PLANS:
               key expressions: _col3 (type: string)
               sort order: -
               Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -1077,6 +1081,7 @@ STAGE PLANS:
                   key expressions: _col3 (type: string)
                   sort order: -
                   Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -1101,6 +1106,7 @@ STAGE PLANS:
               key expressions: _col3 (type: string)
               sort order: -
               Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/order.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/order.q.out b/ql/src/test/results/clientpositive/order.q.out
index 13eee2e..e7cff4e 100644
--- a/ql/src/test/results/clientpositive/order.q.out
+++ b/ql/src/test/results/clientpositive/order.q.out
@@ -23,6 +23,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
@@ -89,6 +90,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: -
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/order2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/order2.q.out b/ql/src/test/results/clientpositive/order2.q.out
index 3a38b46..72491da 100644
--- a/ql/src/test/results/clientpositive/order2.q.out
+++ b/ql/src/test/results/clientpositive/order2.q.out
@@ -27,6 +27,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
index 980b65b..fbd3491 100644
--- a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
@@ -766,6 +766,7 @@ STAGE PLANS:
                   key expressions: _col3 (type: string)
                   sort order: -
                   Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -834,6 +835,7 @@ STAGE PLANS:
                   key expressions: _col3 (type: string)
                   sort order: -
                   Statistics: Num rows: 4 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -1013,6 +1015,7 @@ STAGE PLANS:
                   key expressions: _col3 (type: string)
                   sort order: -
                   Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -1037,6 +1040,7 @@ STAGE PLANS:
               key expressions: _col3 (type: string)
               sort order: -
               Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -1110,6 +1114,7 @@ STAGE PLANS:
                   key expressions: _col3 (type: string)
                   sort order: -
                   Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -1134,6 +1139,7 @@ STAGE PLANS:
               key expressions: _col3 (type: string)
               sort order: -
               Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
@@ -1219,6 +1225,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: float)
                   sort order: -
                   Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: int), _col2 (type: bigint)
       Reduce Operator Tree:
         Select Operator
@@ -1243,6 +1250,7 @@ STAGE PLANS:
               key expressions: _col0 (type: float)
               sort order: -
               Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: int), _col2 (type: bigint)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pcr.q.out b/ql/src/test/results/clientpositive/pcr.q.out
index 82dc31e..4e23917 100644
--- a/ql/src/test/results/clientpositive/pcr.q.out
+++ b/ql/src/test/results/clientpositive/pcr.q.out
@@ -5295,6 +5295,8 @@ STAGE PLANS:
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 tag: -1
+                TopN: 10
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
                 auto parallelism: false
       Path -> Alias:

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/regex_col.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/regex_col.q.out b/ql/src/test/results/clientpositive/regex_col.q.out
index 16f8f88..ddf935b 100644
--- a/ql/src/test/results/clientpositive/regex_col.q.out
+++ b/ql/src/test/results/clientpositive/regex_col.q.out
@@ -368,6 +368,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: ++
                 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/script_pipe.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/script_pipe.q.out b/ql/src/test/results/clientpositive/script_pipe.q.out
index e19fc06..a5d54af 100644
--- a/ql/src/test/results/clientpositive/script_pipe.q.out
+++ b/ql/src/test/results/clientpositive/script_pipe.q.out
@@ -25,6 +25,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   sort order: 
                   Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/select_as_omitted.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/select_as_omitted.q.out b/ql/src/test/results/clientpositive/select_as_omitted.q.out
index de9be1f..645d9e2 100644
--- a/ql/src/test/results/clientpositive/select_as_omitted.q.out
+++ b/ql/src/test/results/clientpositive/select_as_omitted.q.out
@@ -31,6 +31,7 @@ STAGE PLANS:
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                TopN Hash Memory Usage: 0.1
                 value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoin_noskew.q.out b/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
index 24e07c2..a8e3e8b 100644
--- a/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
+++ b/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
@@ -111,6 +111,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/249bcd80/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out
index 1eaea87..8383af5 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out
@@ -152,6 +152,8 @@ STAGE PLANS:
                     key expressions: _col0 (type: int)
                     sort order: +
                     tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)
                     auto parallelism: false
       Path -> Alias:
@@ -373,6 +375,8 @@ STAGE PLANS:
                     sort order: +
                     Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                     tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
                     value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)
                     auto parallelism: false
       Local Work:


[21/27] hive git commit: HIVE-12465: Hive might produce wrong results when (outer) joins are merged (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by om...@apache.org.
HIVE-12465: Hive might produce wrong results when (outer) joins are merged (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b94a2177
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b94a2177
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b94a2177

Branch: refs/heads/master-fixed
Commit: b94a2177b1296705b0e8c9bd00eb27809c79fd56
Parents: eb76634
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Tue Nov 24 17:20:05 2015 +0100
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:37 2015 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   2 +-
 ql/src/test/queries/clientpositive/mergejoin.q  |  12 +
 .../test/results/clientpositive/mergejoin.q.out | 548 +++++++++++++++++++
 .../results/clientpositive/tez/mergejoin.q.out  | 548 +++++++++++++++++++
 4 files changed, 1109 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b94a2177/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 1b7873d..0ff6001 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -2265,7 +2265,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           if (rightCondAl1.size() != 0) {
             QBJoinTree leftTree = joinTree.getJoinSrc();
             List<String> leftTreeLeftSrc = new ArrayList<String>();
-            if (leftTree != null) {
+            if (leftTree != null && leftTree.getNoOuterJoin()) {
               String leftTreeRightSource = leftTree.getRightAliases() != null &&
                   leftTree.getRightAliases().length > 0 ?
                   leftTree.getRightAliases()[0] : null;

http://git-wip-us.apache.org/repos/asf/hive/blob/b94a2177/ql/src/test/queries/clientpositive/mergejoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mergejoin.q b/ql/src/test/queries/clientpositive/mergejoin.q
index 6cd3929..82e1c93 100644
--- a/ql/src/test/queries/clientpositive/mergejoin.q
+++ b/ql/src/test/queries/clientpositive/mergejoin.q
@@ -132,3 +132,15 @@ select * from
 (select * from tab where tab.key = 0)a
 join
 (select * from tab_part where tab_part.key = 98)b on a.key = b.key full outer join tab_part c on b.key = c.key;
+
+set hive.cbo.enable = false;
+
+select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key;
+
+select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/b94a2177/ql/src/test/results/clientpositive/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mergejoin.q.out b/ql/src/test/results/clientpositive/mergejoin.q.out
index 65f5ef5..e4a9e5b 100644
--- a/ql/src/test/results/clientpositive/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/mergejoin.q.out
@@ -3787,3 +3787,551 @@ NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
 NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
 NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08
 NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08
+Warning: Shuffle Join JOIN[9][tables = [a, b]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+Warning: Shuffle Join JOIN[9][tables = [a, b]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	10	val_10	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	105	val_105	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	11	val_11	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	111	val_111	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	114	val_114	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	116	val_116	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	126	val_126	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	131	val_131	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	133	val_133	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	136	val_136	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	143	val_143	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	145	val_145	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	150	val_150	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	153	val_153	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	155	val_155	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	156	val_156	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	157	val_157	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	158	val_158	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	160	val_160	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	162	val_162	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	163	val_163	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	166	val_166	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	168	val_168	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	17	val_17	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	170	val_170	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	177	val_177	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	178	val_178	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	180	val_180	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	181	val_181	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	183	val_183	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	186	val_186	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	189	val_189	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	19	val_19	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	190	val_190	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	192	val_192	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	194	val_194	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	196	val_196	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	2	val_2	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	20	val_20	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	201	val_201	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	202	val_202	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	214	val_214	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	218	val_218	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	222	val_222	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	226	val_226	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	228	val_228	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	235	val_235	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	241	val_241	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	244	val_244	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	247	val_247	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	248	val_248	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	249	val_249	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	252	val_252	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	257	val_257	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	258	val_258	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	260	val_260	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	262	val_262	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	263	val_263	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	266	val_266	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	27	val_27	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	274	val_274	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	275	val_275	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	28	val_28	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	283	val_283	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	284	val_284	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	285	val_285	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	286	val_286	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	287	val_287	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	289	val_289	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	291	val_291	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	292	val_292	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	296	val_296	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	30	val_30	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	302	val_302	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	305	val_305	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	306	val_306	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	308	val_308	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	310	val_310	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	315	val_315	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	323	val_323	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	33	val_33	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	332	val_332	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	335	val_335	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	336	val_336	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	338	val_338	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	339	val_339	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	34	val_34	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	341	val_341	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	345	val_345	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	351	val_351	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	356	val_356	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	360	val_360	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	362	val_362	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	364	val_364	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	365	val_365	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	366	val_366	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	368	val_368	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	373	val_373	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	374	val_374	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	375	val_375	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	377	val_377	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	378	val_378	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	379	val_379	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	386	val_386	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	389	val_389	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	392	val_392	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	393	val_393	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	394	val_394	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	4	val_4	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	400	val_400	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	402	val_402	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	407	val_407	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	41	val_41	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	411	val_411	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	418	val_418	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	419	val_419	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	421	val_421	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	427	val_427	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	43	val_43	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	432	val_432	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	435	val_435	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	436	val_436	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	437	val_437	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	44	val_44	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	443	val_443	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	444	val_444	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	446	val_446	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	448	val_448	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	449	val_449	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	452	val_452	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	453	val_453	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	455	val_455	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	457	val_457	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	460	val_460	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	467	val_467	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	47	val_47	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	470	val_470	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	472	val_472	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	475	val_475	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	477	val_477	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	479	val_479	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	481	val_481	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	482	val_482	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	483	val_483	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	484	val_484	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	485	val_485	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	487	val_487	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	490	val_490	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	491	val_491	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	493	val_493	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	494	val_494	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	495	val_495	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	496	val_496	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	497	val_497	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	53	val_53	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	54	val_54	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	57	val_57	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	64	val_64	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	65	val_65	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	66	val_66	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	69	val_69	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	74	val_74	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	77	val_77	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	78	val_78	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	8	val_8	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	80	val_80	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	82	val_82	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	85	val_85	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	86	val_86	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	87	val_87	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	9	val_9	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	92	val_92	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	96	val_96	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08

http://git-wip-us.apache.org/repos/asf/hive/blob/b94a2177/ql/src/test/results/clientpositive/tez/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/mergejoin.q.out b/ql/src/test/results/clientpositive/tez/mergejoin.q.out
index 8745728..34e4da3 100644
--- a/ql/src/test/results/clientpositive/tez/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/mergejoin.q.out
@@ -3760,3 +3760,551 @@ NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
 NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
 NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08
 NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08
+Warning: Shuffle Join MERGEJOIN[25][tables = [a, b]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+Warning: Shuffle Join MERGEJOIN[17][tables = [a, b]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	10	val_10	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	105	val_105	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	11	val_11	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	111	val_111	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	114	val_114	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	116	val_116	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	126	val_126	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	131	val_131	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	133	val_133	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	136	val_136	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	143	val_143	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	145	val_145	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	150	val_150	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	153	val_153	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	155	val_155	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	156	val_156	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	157	val_157	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	158	val_158	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	160	val_160	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	162	val_162	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	163	val_163	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	166	val_166	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	168	val_168	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	17	val_17	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	170	val_170	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	177	val_177	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	178	val_178	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	180	val_180	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	181	val_181	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	183	val_183	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	186	val_186	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	189	val_189	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	19	val_19	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	190	val_190	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	192	val_192	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	194	val_194	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	196	val_196	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	2	val_2	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	20	val_20	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	201	val_201	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	202	val_202	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	214	val_214	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	218	val_218	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	222	val_222	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	226	val_226	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	228	val_228	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	235	val_235	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	241	val_241	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	244	val_244	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	247	val_247	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	248	val_248	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	249	val_249	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	252	val_252	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	257	val_257	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	258	val_258	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	260	val_260	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	262	val_262	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	263	val_263	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	266	val_266	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	27	val_27	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	274	val_274	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	275	val_275	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	28	val_28	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	283	val_283	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	284	val_284	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	285	val_285	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	286	val_286	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	287	val_287	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	289	val_289	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	291	val_291	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	292	val_292	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	296	val_296	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	30	val_30	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	302	val_302	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	305	val_305	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	306	val_306	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	308	val_308	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	310	val_310	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	315	val_315	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	323	val_323	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	33	val_33	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	332	val_332	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	335	val_335	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	336	val_336	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	338	val_338	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	339	val_339	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	34	val_34	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	341	val_341	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	345	val_345	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	351	val_351	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	356	val_356	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	360	val_360	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	362	val_362	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	364	val_364	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	365	val_365	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	366	val_366	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	368	val_368	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	373	val_373	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	374	val_374	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	375	val_375	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	377	val_377	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	378	val_378	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	379	val_379	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	386	val_386	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	389	val_389	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	392	val_392	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	393	val_393	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	394	val_394	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	4	val_4	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	400	val_400	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	402	val_402	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	407	val_407	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	41	val_41	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	411	val_411	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	418	val_418	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	419	val_419	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	421	val_421	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	427	val_427	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	43	val_43	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	432	val_432	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	435	val_435	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	436	val_436	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	437	val_437	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	44	val_44	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	443	val_443	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	444	val_444	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	446	val_446	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	448	val_448	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	449	val_449	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	452	val_452	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	453	val_453	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	455	val_455	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	457	val_457	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	460	val_460	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	467	val_467	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	47	val_47	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	470	val_470	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	472	val_472	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	475	val_475	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	477	val_477	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	479	val_479	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	481	val_481	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	482	val_482	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	483	val_483	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	484	val_484	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	485	val_485	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	487	val_487	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	490	val_490	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	491	val_491	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	493	val_493	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	494	val_494	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	495	val_495	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	496	val_496	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	497	val_497	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	53	val_53	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	54	val_54	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	57	val_57	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	64	val_64	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	65	val_65	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	66	val_66	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	69	val_69	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	74	val_74	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	77	val_77	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	78	val_78	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	8	val_8	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	80	val_80	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	82	val_82	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	85	val_85	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	86	val_86	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	87	val_87	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	9	val_9	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	92	val_92	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	96	val_96	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08


[05/27] hive git commit: HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs (Ashutosh Chauhan via Jason Dere)

Posted by om...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
deleted file mode 100644
index b1dfd7c..0000000
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
+++ /dev/null
@@ -1,553 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
-create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
-create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tst1
-PREHOOK: query: alter table tst1 clustered by (key) into 8 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: alter table tst1 clustered by (key) into 8 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: describe formatted tst1 partition (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1 partition (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing bucket number
-
-alter table tst1 clustered by (key) into 12 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing bucket number
-
-alter table tst1 clustered by (key) into 12 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: insert overwrite table tst1 partition (ds='1') select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: describe formatted tst1 partition (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1 partition (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing bucket number of (table/partition)
-
-alter table tst1 into 4 buckets
-PREHOOK: type: ALTERTABLE_BUCKETNUM
-PREHOOK: Input: default@tst1
-POSTHOOK: query: -- Test changing bucket number of (table/partition)
-
-alter table tst1 into 4 buckets
-POSTHOOK: type: ALTERTABLE_BUCKETNUM
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted tst1 partition (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1 partition (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: alter table tst1 partition (ds = '1') into 6 buckets
-PREHOOK: type: ALTERPARTITION_BUCKETNUM
-PREHOOK: Input: default@tst1
-POSTHOOK: query: alter table tst1 partition (ds = '1') into 6 buckets
-POSTHOOK: type: ALTERPARTITION_BUCKETNUM
-POSTHOOK: Input: default@tst1
-POSTHOOK: Input: default@tst1@ds=1
-POSTHOOK: Output: default@tst1@ds=1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: describe formatted tst1 partition (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1 partition (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	false               
-#### A masked pattern was here ####
-	numFiles            	1                   
-	numRows             	-1                  
-	rawDataSize         	-1                  
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	6                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test adding sort order
-
-alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test adding sort order
-
-alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing sort order
-
-alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing sort order
-
-alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:value, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test removing test order
-
-alter table tst1 clustered by (value) into 12 buckets
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test removing test order
-
-alter table tst1 clustered by (value) into 12 buckets
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	12                  	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test removing buckets
-
-alter table tst1 not clustered
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test removing buckets
-
-alter table tst1 not clustered
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: describe formatted tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: describe formatted tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
deleted file mode 100644
index e5f8e7f..0000000
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
+++ /dev/null
@@ -1,851 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
--- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
--- the partition metadata is updated as well.
-
-CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
--- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
--- the partition metadata is updated as well.
-
-CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test an unbucketed partition gets converted to bucketed
-ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test an unbucketed partition gets converted to bucketed
-ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test an unsorted partition gets converted to sorted
-ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test an unsorted partition gets converted to sorted
-ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing the bucket columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing the bucket columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	8                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing the number of buckets
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing the number of buckets
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing the sort columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing the sort columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:0)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test changing the sort order
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test changing the sort order
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1)]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test a sorted partition gets converted to unsorted
-ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test a sorted partition gets converted to unsorted
-ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	4                   	 
-Bucket Columns:     	[value]             	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Test a bucketed partition gets converted to unbucketed
-ALTER TABLE tst1 NOT CLUSTERED
-PREHOOK: type: ALTERTABLE_CLUSTER_SORT
-PREHOOK: Input: default@tst1
-PREHOOK: Output: default@tst1
-POSTHOOK: query: -- Test a bucketed partition gets converted to unbucketed
-ALTER TABLE tst1 NOT CLUSTERED
-POSTHOOK: type: ALTERTABLE_CLUSTER_SORT
-POSTHOOK: Input: default@tst1
-POSTHOOK: Output: default@tst1
-PREHOOK: query: DESCRIBE FORMATTED tst1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@tst1@ds=1
-POSTHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@tst1@ds=1
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tst1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@tst1
-POSTHOOK: query: DESCRIBE FORMATTED tst1 PARTITION (ds = '1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@tst1
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[1]                 	 
-Database:           	default             	 
-Table:              	tst1                	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
index f919f10..29a4c4b 100644
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
+++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
@@ -1,13 +1,11 @@
-PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
+PREHOOK: query: -- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
 -- the partition metadata is updated as well.
 
 CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@tst1
-POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
+POSTHOOK: query: -- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
 -- the partition metadata is updated as well.
 
 CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING)

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/cp_sel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cp_sel.q.out b/ql/src/test/results/clientpositive/cp_sel.q.out
index a2c9fe0..a55b28d 100644
--- a/ql/src/test/results/clientpositive/cp_sel.q.out
+++ b/ql/src/test/results/clientpositive/cp_sel.q.out
@@ -75,13 +75,8 @@ insert overwrite table testpartbucket partition(ds,hr) select key,value,'hello'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-0 depends on stages: Stage-1
   Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
 
 STAGE PLANS:
   Stage: Stage-1
@@ -91,26 +86,28 @@ STAGE PLANS:
             alias: srcpart
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), value (type: string), 'hello' (type: string), 'world' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.testpartbucket
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
+                value expressions: _col1 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), 'hello' (type: string), 'world' (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.testpartbucket
 
   Stage: Stage-0
     Move Operator
@@ -128,36 +125,6 @@ STAGE PLANS:
   Stage: Stage-2
     Stats-Aggr Operator
 
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.testpartbucket
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.testpartbucket
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: insert overwrite table testpartbucket partition(ds,hr) select key,value,'hello' as ds, 'world' as hr from srcpart where hr=11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -182,9 +149,9 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@testpartbucket
 POSTHOOK: Input: default@testpartbucket@ds=hello/hr=world
 #### A masked pattern was here ####
-238	val_238	hello	world
-86	val_86	hello	world
-311	val_311	hello	world
+0	val_0	hello	world
+0	val_0	hello	world
+0	val_0	hello	world
 PREHOOK: query: drop table testpartbucket
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@testpartbucket

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/index_auto_update.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_update.q.out b/ql/src/test/results/clientpositive/index_auto_update.q.out
index 11af3f5..c85c80f 100644
--- a/ql/src/test/results/clientpositive/index_auto_update.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_update.q.out
@@ -118,7 +118,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string)
                   sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                  Map-reduce partition columns: _col0 (type: string)
                   value expressions: _col2 (type: array<bigint>)
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out b/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out
index 32e6e92..5d44d27 100644
--- a/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out
+++ b/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out
@@ -1,8 +1,16 @@
-PREHOOK: query: create table studenttab10k (age2 int)
+PREHOOK: query: -- SORT_QUERY_RESULTS;
+
+
+
+create table studenttab10k (age2 int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@studenttab10k
-POSTHOOK: query: create table studenttab10k (age2 int)
+POSTHOOK: query: -- SORT_QUERY_RESULTS;
+
+
+
+create table studenttab10k (age2 int)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@studenttab10k

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/orc_analyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_analyze.q.out b/ql/src/test/results/clientpositive/orc_analyze.q.out
index bc46852..1156feb 100644
--- a/ql/src/test/results/clientpositive/orc_analyze.q.out
+++ b/ql/src/test/results/clientpositive/orc_analyze.q.out
@@ -904,10 +904,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -947,10 +947,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1002,10 +1002,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1045,10 +1045,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1100,10 +1100,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1143,10 +1143,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1249,10 +1249,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	21950               
-	totalSize           	2102                
+	rawDataSize         	21975               
+	totalSize           	5263                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1292,10 +1292,10 @@ Table:              	orc_create_people
 #### A masked pattern was here ####
 Partition Parameters:	 	 
 	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
+	numFiles            	4                   
 	numRows             	50                  
-	rawDataSize         	22050               
-	totalSize           	2118                
+	rawDataSize         	22043               
+	totalSize           	5336                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
index 7b54dbe..e159f5e 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
@@ -148,35 +148,12 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col0 (type: int), _col7 (type: string)
                   outputColumnNames: _col0, _col1
-                  File Output Operator
-                    compressed: false
-                    GlobalTableId: 1
-#### A masked pattern was here ####
-                    NumFilesPerFileSink: 1
-                    Static Partition Specification: ds=1/
-#### A masked pattern was here ####
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        properties:
-                          bucket_count 16
-                          bucket_field_name key
-                          columns key,value
-                          columns.comments 
-                          columns.types int:string
-#### A masked pattern was here ####
-                          name default.test_table3
-                          partition_columns ds
-                          partition_columns.types string
-                          serialization.ddl struct test_table3 { i32 key, string value}
-                          serialization.format 1
-                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        name: default.test_table3
-                    TotalFiles: 1
-                    GatherStats: true
-                    MultiFileSpray: false
+                  Reduce Output Operator
+                    sort order: 
+                    Map-reduce partition columns: _col0 (type: int)
+                    tag: -1
+                    value expressions: _col0 (type: int), _col1 (type: string)
+                    auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -230,6 +207,40 @@ STAGE PLANS:
             name: default.test_table1
       Truncated Path -> Alias:
         /test_table1/ds=1 [a]
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: int), VALUE._col1 (type: string)
+          outputColumnNames: _col0, _col1
+          File Output Operator
+            compressed: false
+            GlobalTableId: 1
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 16
+            Static Partition Specification: ds=1/
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  bucket_count 16
+                  bucket_field_name key
+                  columns key,value
+                  columns.comments 
+                  columns.types int:string
+#### A masked pattern was here ####
+                  name default.test_table3
+                  partition_columns ds
+                  partition_columns.types string
+                  serialization.ddl struct test_table3 { i32 key, string value}
+                  serialization.format 1
+                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.test_table3
+            TotalFiles: 16
+            GatherStats: true
+            MultiFileSpray: true
 
   Stage: Stage-0
     Move Operator
@@ -2023,7 +2034,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: (ds%3D1)000001_0
+            base file name: 000001_0
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -2069,7 +2080,7 @@ STAGE PLANS:
               name: default.test_table3
             name: default.test_table3
       Truncated Path -> Alias:
-        /test_table3/ds=1/(ds%3D1)000001_0 [test_table3]
+        /test_table3/ds=1/000001_0 [test_table3]
 
   Stage: Stage-0
     Fetch Operator
@@ -2138,66 +2149,18 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@test_table3
 POSTHOOK: Input: default@test_table3@ds=1
 #### A masked pattern was here ####
-17	val_17	1
-33	val_33	1
-65	val_65	1
-97	val_97	1
-97	val_97	1
-97	val_97	1
-97	val_97	1
-113	val_113	1
-113	val_113	1
-113	val_113	1
-113	val_113	1
-129	val_129	1
-129	val_129	1
-129	val_129	1
-129	val_129	1
-145	val_145	1
-177	val_177	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-193	val_193	1
-209	val_209	1
-209	val_209	1
-209	val_209	1
-209	val_209	1
-241	val_241	1
-257	val_257	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-273	val_273	1
-289	val_289	1
-305	val_305	1
-321	val_321	1
-321	val_321	1
-321	val_321	1
-321	val_321	1
-353	val_353	1
-353	val_353	1
-353	val_353	1
-353	val_353	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
-369	val_369	1
+497	val_497	1
+481	val_481	1
+449	val_449	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
+417	val_417	1
 401	val_401	1
 401	val_401	1
 401	val_401	1
@@ -2223,18 +2186,66 @@ POSTHOOK: Input: default@test_table3@ds=1
 401	val_401	1
 401	val_401	1
 401	val_401	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-417	val_417	1
-449	val_449	1
-481	val_481	1
-497	val_497	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+369	val_369	1
+353	val_353	1
+353	val_353	1
+353	val_353	1
+353	val_353	1
+321	val_321	1
+321	val_321	1
+321	val_321	1
+321	val_321	1
+305	val_305	1
+289	val_289	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+273	val_273	1
+257	val_257	1
+241	val_241	1
+209	val_209	1
+209	val_209	1
+209	val_209	1
+209	val_209	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+193	val_193	1
+177	val_177	1
+145	val_145	1
+129	val_129	1
+129	val_129	1
+129	val_129	1
+129	val_129	1
+113	val_113	1
+113	val_113	1
+113	val_113	1
+113	val_113	1
+97	val_97	1
+97	val_97	1
+97	val_97	1
+97	val_97	1
+65	val_65	1
+33	val_33	1
+17	val_17	1
 PREHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed
 SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
index 4d912ca..af885b9 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
@@ -127,6 +127,8 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -153,36 +155,13 @@ STAGE PLANS:
                         expressions: _col0 (type: int), _col7 (type: string)
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                        File Output Operator
-                          compressed: false
-                          GlobalTableId: 1
-#### A masked pattern was here ####
-                          NumFilesPerFileSink: 1
-                          Static Partition Specification: ds=1/
+                        Reduce Output Operator
+                          sort order: 
+                          Map-reduce partition columns: _col0 (type: int)
                           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                          table:
-                              input format: org.apache.hadoop.mapred.TextInputFormat
-                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                              properties:
-                                bucket_count 16
-                                bucket_field_name key
-                                columns key,value
-                                columns.comments 
-                                columns.types int:string
-#### A masked pattern was here ####
-                                name default.test_table3
-                                partition_columns ds
-                                partition_columns.types string
-                                serialization.ddl struct test_table3 { i32 key, string value}
-                                serialization.format 1
-                                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                              name: default.test_table3
-                          TotalFiles: 1
-                          GatherStats: true
-                          MultiFileSpray: false
+                          tag: -1
+                          value expressions: _col0 (type: int), _col1 (type: string)
+                          auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -236,6 +215,43 @@ STAGE PLANS:
                   name: default.test_table1
             Truncated Path -> Alias:
               /test_table1/ds=1 [a]
+        Reducer 2 
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: int), VALUE._col1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 1
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 16
+                  Static Partition Specification: ds=1/
+                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      properties:
+                        bucket_count 16
+                        bucket_field_name key
+                        columns key,value
+                        columns.comments 
+                        columns.types int:string
+#### A masked pattern was here ####
+                        name default.test_table3
+                        partition_columns ds
+                        partition_columns.types string
+                        serialization.ddl struct test_table3 { i32 key, string value}
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+                  TotalFiles: 16
+                  GatherStats: true
+                  MultiFileSpray: true
 
   Stage: Stage-0
     Move Operator


[08/27] hive git commit: HIVE-12469 : Bump Commons-Collections dependency from 3.2.1 to 3.2.2. to address vulnerability (Ashutosh Chauhan via Sergio Pena, Reuben Kuhnert)

Posted by om...@apache.org.
HIVE-12469 : Bump Commons-Collections dependency from 3.2.1 to 3.2.2. to address vulnerability (Ashutosh Chauhan via Sergio Pena, Reuben Kuhnert)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/68e1c0bb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/68e1c0bb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/68e1c0bb

Branch: refs/heads/master-fixed
Commit: 68e1c0bb38ec8922be64b13f86cf155a3c5e50f6
Parents: 1132055
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Thu Nov 19 11:06:37 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:35 2015 -0800

----------------------------------------------------------------------
 accumulo-handler/pom.xml | 8 +++++++-
 ant/pom.xml              | 6 ++++++
 common/pom.xml           | 6 +++++-
 hplsql/pom.xml           | 5 -----
 pom.xml                  | 8 +++++++-
 5 files changed, 25 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/68e1c0bb/accumulo-handler/pom.xml
----------------------------------------------------------------------
diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index 759c718..9d5185b 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -39,7 +39,13 @@
     <dependency>
       <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-core</artifactId>
-    </dependency>
+            <exclusions>
+             <exclusion>
+            <groupId>commons-collections</groupId>
+            <artifactId>commons-collections</artifactId>
+          </exclusion>
+           </exclusions>
+   </dependency>
     <dependency>
       <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-fate</artifactId>

http://git-wip-us.apache.org/repos/asf/hive/blob/68e1c0bb/ant/pom.xml
----------------------------------------------------------------------
diff --git a/ant/pom.xml b/ant/pom.xml
index a1f7921..9706572 100644
--- a/ant/pom.xml
+++ b/ant/pom.xml
@@ -53,6 +53,12 @@
       <groupId>org.apache.velocity</groupId>
       <artifactId>velocity</artifactId>
       <version>${velocity.version}</version>
+           <exclusions>
+             <exclusion>
+            <groupId>commons-collections</groupId>
+            <artifactId>commons-collections</artifactId>
+          </exclusion>
+           </exclusions>
     </dependency>
   </dependencies>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/68e1c0bb/common/pom.xml
----------------------------------------------------------------------
diff --git a/common/pom.xml b/common/pom.xml
index cd14581..ee74282 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -91,7 +91,11 @@
       <version>${hadoop.version}</version>
       <optional>true</optional>
           <exclusions>
-            <exclusion>
+             <exclusion>
+            <groupId>commons-collections</groupId>
+            <artifactId>commons-collections</artifactId>
+          </exclusion>
+           <exclusion>
             <groupId>org.slf4j</groupId>
             <artifactId>slf4j-log4j12</artifactId>
           </exclusion>

http://git-wip-us.apache.org/repos/asf/hive/blob/68e1c0bb/hplsql/pom.xml
----------------------------------------------------------------------
diff --git a/hplsql/pom.xml b/hplsql/pom.xml
index 0aa647b..6329002 100644
--- a/hplsql/pom.xml
+++ b/hplsql/pom.xml
@@ -39,11 +39,6 @@
        <version>${guava.version}</version>
     </dependency>
     <dependency>
-        <groupId>commons-collections</groupId>
-        <artifactId>commons-collections</artifactId>
-        <version>3.2.1</version>
-    </dependency>
-    <dependency>
       <groupId>commons-cli</groupId>
       <artifactId>commons-cli</artifactId>
       <version>${commons-cli.version}</version>

http://git-wip-us.apache.org/repos/asf/hive/blob/68e1c0bb/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index c38c10f..34bdbf6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -111,6 +111,7 @@
     <datanucleus-rdbms.version>3.2.9</datanucleus-rdbms.version>
     <commons-cli.version>1.2</commons-cli.version>
     <commons-codec.version>1.4</commons-codec.version>
+    <commons-collections.version>3.2.2</commons-collections.version>
     <commons-compress.version>1.9</commons-compress.version>
     <commons-exec.version>1.1</commons-exec.version>
     <commons-httpclient.version>3.0.1</commons-httpclient.version>
@@ -303,7 +304,12 @@
         <artifactId>commons-codec</artifactId>
         <version>${commons-codec.version}</version>
       </dependency>
-      <dependency>
+       <dependency>
+        <groupId>commons-collections</groupId>
+        <artifactId>commons-collections</artifactId>
+        <version>${commons-collections.version}</version>
+      </dependency>
+     <dependency>
         <groupId>commons-httpclient</groupId>
         <artifactId>commons-httpclient</artifactId>
         <version>${commons-httpclient.version}</version>


[07/27] hive git commit: HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs (Ashutosh Chauhan via Jason Dere)

Posted by om...@apache.org.
HIVE-12331 : Remove hive.enforce.bucketing & hive.enforce.sorting configs (Ashutosh Chauhan via Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a53d2af5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a53d2af5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a53d2af5

Branch: refs/heads/master-fixed
Commit: a53d2af54487aae3d38932409ff4f4f4011ecd90
Parents: 1918735
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Tue Nov 24 17:08:35 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:34 2015 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   8 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |   1 +
 .../apache/hadoop/hive/ql/exec/Utilities.java   |   3 +-
 .../optimizer/SortedDynPartitionOptimizer.java  |  86 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  54 +-
 .../apache/hadoop/hive/ql/TestTxnCommands.java  |  14 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |   6 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java      |   5 +-
 .../queries/clientnegative/acid_overwrite.q     |   2 +-
 .../queries/clientnegative/archive_corrupt.q    |   2 +-
 .../authorization_delete_nodeletepriv.q         |   2 +-
 .../authorization_update_noupdatepriv.q         |   2 +-
 .../clientnegative/delete_non_acid_table.q      |   2 +-
 .../clientnegative/delete_not_bucketed.q        |   2 +-
 .../test/queries/clientnegative/delete_sorted.q |   2 +-
 .../test/queries/clientnegative/insert_sorted.q |   2 +-
 .../clientnegative/insert_values_sorted.q       |   2 +-
 .../queries/clientnegative/merge_negative_3.q   |   4 +-
 .../queries/clientnegative/smb_bucketmapjoin.q  |   4 +-
 .../queries/clientnegative/smb_mapjoin_14.q     |   4 +-
 .../sortmerge_mapjoin_mismatch_1.q              |   4 +-
 .../queries/clientnegative/update_bucket_col.q  |   2 +-
 .../clientnegative/update_no_such_table.q       |   2 +-
 .../clientnegative/update_non_acid_table.q      |   2 +-
 .../clientnegative/update_not_bucketed.q        |   2 +-
 .../clientnegative/update_partition_col.q       |   2 +-
 .../test/queries/clientnegative/update_sorted.q |   2 +-
 ql/src/test/queries/clientpositive/acid_join.q  |   2 +-
 .../queries/clientpositive/acid_vectorization.q |   2 +-
 .../acid_vectorization_partition.q              |   2 +-
 .../clientpositive/acid_vectorization_project.q |   2 +-
 .../alter_numbuckets_partitioned_table.q        |  59 --
 .../alter_numbuckets_partitioned_table2.q       |  85 --
 .../alter_numbuckets_partitioned_table2_h23.q   |   5 +-
 .../alter_numbuckets_partitioned_table_h23.q    |   2 +-
 .../clientpositive/archive_excludeHadoop20.q    |   2 +-
 .../test/queries/clientpositive/archive_multi.q |   2 +-
 .../clientpositive/authorization_delete.q       |   2 +-
 .../authorization_delete_own_table.q            |   2 +-
 .../clientpositive/authorization_update.q       |   2 +-
 .../authorization_update_own_table.q            |   2 +-
 .../clientpositive/auto_smb_mapjoin_14.q        |   4 +-
 .../clientpositive/auto_sortmerge_join_10.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_13.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_14.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_15.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_16.q     |   4 +-
 .../clientpositive/auto_sortmerge_join_6.q      |   4 +-
 .../clientpositive/auto_sortmerge_join_9.q      |   4 +-
 ql/src/test/queries/clientpositive/bucket1.q    |   2 +-
 ql/src/test/queries/clientpositive/bucket2.q    |   2 +-
 ql/src/test/queries/clientpositive/bucket3.q    |   2 +-
 ql/src/test/queries/clientpositive/bucket4.q    |   4 +-
 ql/src/test/queries/clientpositive/bucket5.q    |   4 +-
 ql/src/test/queries/clientpositive/bucket6.q    |   4 +-
 .../test/queries/clientpositive/bucket_many.q   |   2 +-
 .../queries/clientpositive/bucket_map_join_1.q  |   4 +-
 .../queries/clientpositive/bucket_map_join_2.q  |   4 +-
 .../clientpositive/bucket_map_join_spark4.q     |   8 +-
 .../clientpositive/bucket_map_join_tez1.q       |   4 +-
 .../clientpositive/bucket_map_join_tez2.q       |   4 +-
 .../clientpositive/bucket_num_reducers.q        |   2 +-
 .../clientpositive/bucket_num_reducers2.q       |   2 +-
 .../queries/clientpositive/bucketmapjoin13.q    |   4 +-
 .../queries/clientpositive/bucketmapjoin6.q     |   4 +-
 .../bucketsortoptimize_insert_1.q               |   4 +-
 .../bucketsortoptimize_insert_2.q               |   4 +-
 .../bucketsortoptimize_insert_3.q               |   4 +-
 .../bucketsortoptimize_insert_4.q               |   4 +-
 .../bucketsortoptimize_insert_5.q               |   4 +-
 .../bucketsortoptimize_insert_6.q               |   4 +-
 .../bucketsortoptimize_insert_7.q               |   4 +-
 .../bucketsortoptimize_insert_8.q               |   4 +-
 .../queries/clientpositive/cbo_rp_auto_join1.q  |   4 +-
 ql/src/test/queries/clientpositive/combine3.q   |   2 +-
 .../clientpositive/delete_all_non_partitioned.q |   2 +-
 .../clientpositive/delete_all_partitioned.q     |   2 +-
 .../queries/clientpositive/delete_orig_table.q  |   2 +-
 .../queries/clientpositive/delete_tmp_table.q   |   2 +-
 .../clientpositive/delete_where_no_match.q      |   2 +-
 .../delete_where_non_partitioned.q              |   2 +-
 .../clientpositive/delete_where_partitioned.q   |   2 +-
 .../clientpositive/delete_whole_partition.q     |   2 +-
 .../disable_merge_for_bucketing.q               |   2 +-
 .../clientpositive/dynpart_sort_opt_bucketing.q |   8 +-
 .../dynpart_sort_opt_vectorization.q            |   8 +-
 .../clientpositive/dynpart_sort_optimization.q  |   8 +-
 .../clientpositive/dynpart_sort_optimization2.q |   4 +-
 .../dynpart_sort_optimization_acid.q            |   2 +-
 .../encryption_insert_partition_dynamic.q       |   2 +-
 .../encryption_insert_partition_static.q        |   2 +-
 .../test/queries/clientpositive/enforce_order.q |   2 +-
 .../test/queries/clientpositive/explainuser_1.q |   6 +-
 .../test/queries/clientpositive/explainuser_2.q |   4 +-
 .../test/queries/clientpositive/explainuser_3.q |   6 +-
 .../queries/clientpositive/groupby_sort_1.q     |   4 +-
 .../queries/clientpositive/groupby_sort_10.q    |   4 +-
 .../queries/clientpositive/groupby_sort_11.q    |   4 +-
 .../queries/clientpositive/groupby_sort_1_23.q  |   4 +-
 .../queries/clientpositive/groupby_sort_2.q     |   4 +-
 .../queries/clientpositive/groupby_sort_3.q     |   4 +-
 .../queries/clientpositive/groupby_sort_4.q     |   4 +-
 .../queries/clientpositive/groupby_sort_5.q     |   4 +-
 .../queries/clientpositive/groupby_sort_6.q     |   4 +-
 .../queries/clientpositive/groupby_sort_7.q     |   4 +-
 .../queries/clientpositive/groupby_sort_8.q     |   4 +-
 .../queries/clientpositive/groupby_sort_9.q     |   4 +-
 .../clientpositive/groupby_sort_skew_1.q        |   4 +-
 .../clientpositive/groupby_sort_skew_1_23.q     |   4 +-
 .../clientpositive/groupby_sort_test_1.q        |   4 +-
 .../infer_bucket_sort_bucketed_table.q          |   4 +-
 .../infer_bucket_sort_map_operators.q           |   4 +-
 .../insert_acid_dynamic_partition.q             |   2 +-
 .../clientpositive/insert_acid_not_bucketed.q   |   2 +-
 .../clientpositive/insert_into_with_schema2.q   |   2 +-
 .../clientpositive/insert_nonacid_from_acid.q   |   2 +-
 .../queries/clientpositive/insert_orig_table.q  |   2 +-
 .../clientpositive/insert_update_delete.q       |   2 +-
 .../insert_values_acid_not_bucketed.q           |   2 +-
 .../insert_values_dynamic_partitioned.q         |   2 +-
 .../insert_values_non_partitioned.q             |   2 +-
 .../clientpositive/insert_values_orig_table.q   |   2 +-
 .../clientpositive/insert_values_partitioned.q  |   2 +-
 .../clientpositive/insert_values_tmp_table.q    |   2 +-
 .../clientpositive/insertoverwrite_bucket.q     |   4 +-
 .../test/queries/clientpositive/join_nullsafe.q |   4 +-
 .../queries/clientpositive/load_dyn_part2.q     |   2 +-
 ql/src/test/queries/clientpositive/mergejoin.q  |   4 +-
 .../queries/clientpositive/orc_empty_files.q    |   2 +-
 .../partition_wise_fileformat14.q               |   4 +-
 .../test/queries/clientpositive/quotedid_smb.q  |   4 +-
 .../queries/clientpositive/reduce_deduplicate.q |   2 +-
 ql/src/test/queries/clientpositive/sample10.q   |   2 +-
 .../test/queries/clientpositive/smb_mapjoin9.q  |   4 +-
 .../queries/clientpositive/smb_mapjoin_11.q     |   8 +-
 .../queries/clientpositive/smb_mapjoin_12.q     |   8 +-
 .../queries/clientpositive/smb_mapjoin_13.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_14.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_15.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_16.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_17.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_18.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_19.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_20.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_21.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_22.q     |   4 +-
 .../queries/clientpositive/smb_mapjoin_25.q     |   4 +-
 .../test/queries/clientpositive/smb_mapjoin_6.q |   4 +-
 .../test/queries/clientpositive/smb_mapjoin_7.q |   4 +-
 .../test/queries/clientpositive/smb_mapjoin_8.q |   4 +-
 .../clientpositive/sort_merge_join_desc_1.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_2.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_3.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_4.q     |   2 +-
 .../clientpositive/sort_merge_join_desc_5.q     |   4 +-
 .../clientpositive/sort_merge_join_desc_6.q     |   4 +-
 .../clientpositive/sort_merge_join_desc_7.q     |   4 +-
 .../clientpositive/sort_merge_join_desc_8.q     |   2 +-
 ql/src/test/queries/clientpositive/stats10.q    |   2 +-
 .../clientpositive/tez_bmj_schema_evolution.q   |   4 +-
 ql/src/test/queries/clientpositive/tez_fsstat.q |   4 +-
 ql/src/test/queries/clientpositive/tez_smb_1.q  |   4 +-
 .../test/queries/clientpositive/tez_smb_empty.q |   4 +-
 .../test/queries/clientpositive/tez_smb_main.q  |   4 +-
 .../queries/clientpositive/transform_acid.q     |   2 +-
 .../clientpositive/truncate_column_buckets.q    |   2 +-
 .../update_after_multiple_inserts.q             |   2 +-
 .../clientpositive/update_all_non_partitioned.q |   2 +-
 .../clientpositive/update_all_partitioned.q     |   2 +-
 .../queries/clientpositive/update_all_types.q   |   2 +-
 .../queries/clientpositive/update_orig_table.q  |   2 +-
 .../queries/clientpositive/update_tmp_table.q   |   2 +-
 .../queries/clientpositive/update_two_cols.q    |   2 +-
 .../clientpositive/update_where_no_match.q      |   2 +-
 .../update_where_non_partitioned.q              |   2 +-
 .../clientpositive/update_where_partitioned.q   |   2 +-
 .../clientpositive/vector_auto_smb_mapjoin_14.q |   4 +-
 .../test/queries/clientpositive/vector_bucket.q |   2 +-
 .../alter_numbuckets_partitioned_table.q.out    | 553 ------------
 .../alter_numbuckets_partitioned_table2.q.out   | 851 -------------------
 ...lter_numbuckets_partitioned_table2_h23.q.out |   6 +-
 ql/src/test/results/clientpositive/cp_sel.q.out |  81 +-
 .../clientpositive/index_auto_update.q.out      |   2 +-
 .../insert_into_with_schema2.q.out              |  12 +-
 .../results/clientpositive/orc_analyze.q.out    |  48 +-
 .../results/clientpositive/smb_mapjoin_11.q.out | 217 ++---
 .../clientpositive/spark/smb_mapjoin_11.q.out   |  74 +-
 .../clientpositive/spark/smb_mapjoin_12.q.out   | 154 ++--
 .../results/clientpositive/spark/stats9.q.out   |   2 +-
 ql/src/test/results/clientpositive/stats9.q.out |   2 +-
 .../clientpositive/tez/orc_analyze.q.out        |  48 +-
 191 files changed, 690 insertions(+), 2220 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index fffedd9..2bd850d 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1198,12 +1198,8 @@ public class HiveConf extends Configuration {
         "The log level to use for tasks executing as part of the DAG.\n" +
         "Used only if hive.tez.java.opts is used to configure Java options."),
 
-    HIVEENFORCEBUCKETING("hive.enforce.bucketing", false,
-        "Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced."),
-    HIVEENFORCESORTING("hive.enforce.sorting", false,
-        "Whether sorting is enforced. If true, while inserting into the table, sorting is enforced."),
     HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true,
-        "If hive.enforce.bucketing or hive.enforce.sorting is true, don't create a reducer for enforcing \n" +
+        "Don't create a reducer for enforcing \n" +
         "bucketing/sorting for queries of the form: \n" +
         "insert overwrite table T2 select * from T1;\n" +
         "where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."),
@@ -3082,9 +3078,7 @@ public class HiveConf extends Configuration {
     ConfVars.DROPIGNORESNONEXISTENT.varname,
     ConfVars.HIVECOUNTERGROUP.varname,
     ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname,
-    ConfVars.HIVEENFORCEBUCKETING.varname,
     ConfVars.HIVEENFORCEBUCKETMAPJOIN.varname,
-    ConfVars.HIVEENFORCESORTING.varname,
     ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN.varname,
     ConfVars.HIVEEXPREVALUATIONCACHE.varname,
     ConfVars.HIVEHASHTABLELOADFACTOR.varname,

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 892587a..8a47605 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -423,6 +423,7 @@ public enum ErrorMsg {
   IMPORT_INTO_STRICT_REPL_TABLE(10303,"Non-repl import disallowed against table that is a destination of replication."),
   CTAS_LOCATION_NONEMPTY(10304, "CREATE-TABLE-AS-SELECT cannot create table with location to a non-empty directory."),
   CTAS_CREATES_VOID_TYPE(10305, "CREATE-TABLE-AS-SELECT creates a VOID type, please use CAST to specify the type, near field: "),
+  TBL_SORTED_NOT_BUCKETED(10306, "Destination table {0} found to be sorted but not bucketed.", true),
   //========================== 20000 range starts here ========================//
   SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
   SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. "

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 8b8cf6d..4eb46ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -2159,8 +2159,7 @@ public final class Utilities {
       FileStatus[] items = fs.listStatus(path);
       taskIDToFile = removeTempOrDuplicateFiles(items, fs);
       if(taskIDToFile != null && taskIDToFile.size() > 0 && conf != null && conf.getTable() != null
-          && (conf.getTable().getNumBuckets() > taskIDToFile.size())
-          && (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+          && (conf.getTable().getNumBuckets() > taskIDToFile.size())) {
           // get the missing buckets and generate empty buckets for non-dynamic partition
         String taskID1 = taskIDToFile.keySet().iterator().next();
         Path bucketPath = taskIDToFile.values().iterator().next().getPath();

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
index e2a0eae..c3553a5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
@@ -29,8 +29,6 @@ import java.util.Stack;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.ObjectPair;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -76,8 +74,7 @@ import com.google.common.collect.Maps;
  * When dynamic partitioning (with or without bucketing and sorting) is enabled, this optimization
  * sorts the records on partition, bucket and sort columns respectively before inserting records
  * into the destination table. This enables reducers to keep only one record writer all the time
- * thereby reducing the the memory pressure on the reducers. This optimization will force a reducer
- * even when hive.enforce.bucketing and hive.enforce.sorting is set to false.
+ * thereby reducing the the memory pressure on the reducers.
  */
 public class SortedDynPartitionOptimizer implements Transform {
 
@@ -270,58 +267,53 @@ public class SortedDynPartitionOptimizer implements Transform {
     // Remove RS and SEL introduced by enforce bucketing/sorting config
     // Convert PARENT -> RS -> SEL -> FS to PARENT -> FS
     private boolean removeRSInsertedByEnforceBucketing(FileSinkOperator fsOp) {
-      HiveConf hconf = parseCtx.getConf();
-      boolean enforceBucketing = HiveConf.getBoolVar(hconf, ConfVars.HIVEENFORCEBUCKETING);
-      boolean enforceSorting = HiveConf.getBoolVar(hconf, ConfVars.HIVEENFORCESORTING);
-      if (enforceBucketing || enforceSorting) {
-        Set<ReduceSinkOperator> reduceSinks = OperatorUtils.findOperatorsUpstream(fsOp,
-            ReduceSinkOperator.class);
-        Operator<? extends OperatorDesc> rsToRemove = null;
-        List<ReduceSinkOperator> rsOps = parseCtx
-            .getReduceSinkOperatorsAddedByEnforceBucketingSorting();
-        boolean found = false;
-
-        // iterate through all RS and locate the one introduce by enforce bucketing
-        for (ReduceSinkOperator reduceSink : reduceSinks) {
-          for (ReduceSinkOperator rsOp : rsOps) {
-            if (reduceSink.equals(rsOp)) {
-              rsToRemove = reduceSink;
-              found = true;
-              break;
-            }
-          }
 
-          if (found) {
+      Set<ReduceSinkOperator> reduceSinks = OperatorUtils.findOperatorsUpstream(fsOp,
+          ReduceSinkOperator.class);
+      Operator<? extends OperatorDesc> rsToRemove = null;
+      List<ReduceSinkOperator> rsOps = parseCtx
+          .getReduceSinkOperatorsAddedByEnforceBucketingSorting();
+      boolean found = false;
+
+      // iterate through all RS and locate the one introduce by enforce bucketing
+      for (ReduceSinkOperator reduceSink : reduceSinks) {
+        for (ReduceSinkOperator rsOp : rsOps) {
+          if (reduceSink.equals(rsOp)) {
+            rsToRemove = reduceSink;
+            found = true;
             break;
           }
         }
 
-        // iF RS is found remove it and its child (EX) and connect its parent
-        // and grand child
         if (found) {
-          Operator<? extends OperatorDesc> rsParent = rsToRemove.getParentOperators().get(0);
-          Operator<? extends OperatorDesc> rsChild = rsToRemove.getChildOperators().get(0);
-          Operator<? extends OperatorDesc> rsGrandChild = rsChild.getChildOperators().get(0);
-
-          if (rsChild instanceof SelectOperator) {
-            // if schema size cannot be matched, then it could be because of constant folding
-            // converting partition column expression to constant expression. The constant
-            // expression will then get pruned by column pruner since it will not reference to
-            // any columns.
-            if (rsParent.getSchema().getSignature().size() !=
-                rsChild.getSchema().getSignature().size()) {
-              return false;
-            }
-            rsParent.getChildOperators().clear();
-            rsParent.getChildOperators().add(rsGrandChild);
-            rsGrandChild.getParentOperators().clear();
-            rsGrandChild.getParentOperators().add(rsParent);
-            LOG.info("Removed " + rsToRemove.getOperatorId() + " and " + rsChild.getOperatorId()
-                + " as it was introduced by enforce bucketing/sorting.");
-          }
+          break;
         }
       }
 
+      // iF RS is found remove it and its child (EX) and connect its parent
+      // and grand child
+      if (found) {
+        Operator<? extends OperatorDesc> rsParent = rsToRemove.getParentOperators().get(0);
+        Operator<? extends OperatorDesc> rsChild = rsToRemove.getChildOperators().get(0);
+        Operator<? extends OperatorDesc> rsGrandChild = rsChild.getChildOperators().get(0);
+
+        if (rsChild instanceof SelectOperator) {
+          // if schema size cannot be matched, then it could be because of constant folding
+          // converting partition column expression to constant expression. The constant
+          // expression will then get pruned by column pruner since it will not reference to
+          // any columns.
+          if (rsParent.getSchema().getSignature().size() !=
+              rsChild.getSchema().getSignature().size()) {
+            return false;
+          }
+          rsParent.getChildOperators().clear();
+          rsParent.getChildOperators().add(rsGrandChild);
+          rsGrandChild.getParentOperators().clear();
+          rsGrandChild.getParentOperators().add(rsParent);
+          LOG.info("Removed " + rsToRemove.getOperatorId() + " and " + rsChild.getOperatorId()
+              + " as it was introduced by enforce bucketing/sorting.");
+        }
+      }
       return true;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 69bb9d7..1b7873d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6054,7 +6054,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     // spray the data into multiple buckets. That way, we can support a very large
     // number of buckets without needing a very large number of reducers.
     boolean enforceBucketing = false;
-    boolean enforceSorting = false;
     ArrayList<ExprNodeDesc> partnCols = new ArrayList<ExprNodeDesc>();
     ArrayList<ExprNodeDesc> sortCols = new ArrayList<ExprNodeDesc>();
     ArrayList<Integer> sortOrders = new ArrayList<Integer>();
@@ -6062,8 +6061,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     int numFiles = 1;
     int totalFiles = 1;
 
-    if ((dest_tab.getNumBuckets() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+    if (dest_tab.getNumBuckets() > 0) {
       enforceBucketing = true;
       if (updating() || deleting()) {
         partnCols = getPartitionColsFromBucketColsForUpdateDelete(input, true);
@@ -6073,24 +6071,27 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     if ((dest_tab.getSortCols() != null) &&
-        (dest_tab.getSortCols().size() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))) {
-      enforceSorting = true;
+        (dest_tab.getSortCols().size() > 0)) {
       sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
       sortOrders = getSortOrders(dest, qb, dest_tab, input);
-      if (!enforceBucketing) {
-        partnCols = sortCols;
+      if (!enforceBucketing && !dest_tab.isIndexTable()) {
+        throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
+      } else {
+        if (!enforceBucketing) {
+          partnCols = sortCols;
+        }
       }
+      enforceBucketing = true;
     }
 
-    if (enforceBucketing || enforceSorting) {
+    if (enforceBucketing) {
       int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
       if (conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS) > 0) {
         maxReducers = conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS);
       }
       int numBuckets = dest_tab.getNumBuckets();
       if (numBuckets > maxReducers) {
-        LOG.debug("XXXXXX numBuckets is " + numBuckets + " and maxReducers is " + maxReducers);
+        LOG.debug("numBuckets is {}", numBuckets, " and maxReducers is {}", maxReducers);
         multiFileSpray = true;
         totalFiles = numBuckets;
         if (totalFiles % maxReducers == 0) {
@@ -6123,11 +6124,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   private void genPartnCols(String dest, Operator input, QB qb,
       TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
     boolean enforceBucketing = false;
-    boolean enforceSorting = false;
     ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
 
-    if ((dest_tab.getNumBuckets() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+    if ((dest_tab.getNumBuckets() > 0)) {
       enforceBucketing = true;
       if (updating() || deleting()) {
         partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false);
@@ -6138,15 +6137,19 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     if ((dest_tab.getSortCols() != null) &&
-        (dest_tab.getSortCols().size() > 0) &&
-        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))) {
-      enforceSorting = true;
-      if (!enforceBucketing) {
-        partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
+        (dest_tab.getSortCols().size() > 0)) {
+      if (!enforceBucketing && !dest_tab.isIndexTable()) {
+        throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
       }
+      else {
+        if(!enforceBucketing) {
+          partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
+        }
+      }
+      enforceBucketing = true;
     }
 
-    if (enforceBucketing || enforceSorting) {
+    if (enforceBucketing) {
       ctx.setPartnCols(partnColsNoConvert);
     }
   }
@@ -6234,8 +6237,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         if (dpCtx.getSPPath() != null) {
           dest_path = new Path(dest_tab.getPath(), dpCtx.getSPPath());
         }
-        if ((dest_tab.getNumBuckets() > 0) &&
-            (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+        if ((dest_tab.getNumBuckets() > 0)) {
           dpCtx.setNumBuckets(dest_tab.getNumBuckets());
         }
       }
@@ -6542,12 +6544,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     RowSchema fsRS = new RowSchema(vecCol);
 
     // The output files of a FileSink can be merged if they are either not being written to a table
-    // or are being written to a table which is either not bucketed or enforce bucketing is not set
-    // and table the table is either not sorted or enforce sorting is not set
-    boolean canBeMerged = (dest_tab == null || !((dest_tab.getNumBuckets() > 0 &&
-        conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING)) ||
-        (dest_tab.getSortCols() != null && dest_tab.getSortCols().size() > 0 &&
-        conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))));
+    // or are being written to a table which is not bucketed
+    // and table the table is not sorted
+    boolean canBeMerged = (dest_tab == null || !((dest_tab.getNumBuckets() > 0) ||
+        (dest_tab.getSortCols() != null && dest_tab.getSortCols().size() > 0)));
 
     // If this table is working with ACID semantics, turn off merging
     canBeMerged &= !destTableIsAcid;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index e13e6eb..db8b7d6 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -28,9 +28,9 @@ import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 /**
- * The LockManager is not ready, but for no-concurrency straight-line path we can 
+ * The LockManager is not ready, but for no-concurrency straight-line path we can
  * test AC=true, and AC=false with commit/rollback/exception and test resulting data.
- * 
+ *
  * Can also test, calling commit in AC=true mode, etc, toggling AC...
  */
 public class TestTxnCommands {
@@ -50,7 +50,7 @@ public class TestTxnCommands {
     ACIDTBL2("acidTbl2"),
     NONACIDORCTBL("nonAcidOrcTbl"),
     NONACIDORCTBL2("nonAcidOrcTbl2");
-    
+
     private final String name;
     @Override
     public String toString() {
@@ -70,7 +70,6 @@ public class TestTxnCommands {
     hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
     hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
     TxnDbUtil.setConfValues(hiveConf);
-    hiveConf.setBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING, true);
     TxnDbUtil.prepDb();
     File f = new File(TEST_WAREHOUSE_DIR);
     if (f.exists()) {
@@ -107,7 +106,7 @@ public class TestTxnCommands {
       FileUtils.deleteDirectory(new File(TEST_DATA_DIR));
     }
   }
-  @Test 
+  @Test
   public void testInsertOverwrite() throws Exception {
     runStatementOnDriver("insert overwrite table " + Table.NONACIDORCTBL + " select a,b from " + Table.NONACIDORCTBL2);
     runStatementOnDriver("create table " + Table.NONACIDORCTBL2 + "3(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES ('transactional'='false')");
@@ -211,7 +210,7 @@ public class TestTxnCommands {
     rs0 = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by a,b");
     Assert.assertEquals("Can't see my own write", 1, rs0.size());
   }
-  @Test 
+  @Test
   public void testReadMyOwnInsert() throws Exception {
     runStatementOnDriver("set autocommit false");
     runStatementOnDriver("START TRANSACTION");
@@ -431,6 +430,7 @@ public class TestTxnCommands {
     return rs;
   }
   private static final class RowComp implements Comparator<int[]> {
+    @Override
     public int compare(int[] row1, int[] row2) {
       assert row1 != null && row2 != null && row1.length == row2.length;
       for(int i = 0; i < row1.length; i++) {
@@ -462,7 +462,7 @@ public class TestTxnCommands {
     sb.setLength(sb.length() - 1);//remove trailing comma
     return sb.toString();
   }
-  
+
   private List<String> runStatementOnDriver(String stmt) throws Exception {
     CommandProcessorResponse cpr = d.run(stmt);
     if(cpr.getResponseCode() != 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 5aa2500..8616eb0 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -62,7 +62,7 @@ public class TestTxnCommands2 {
     ACIDTBLPART("acidTblPart"),
     NONACIDORCTBL("nonAcidOrcTbl"),
     NONACIDPART("nonAcidPart");
-    
+
     private final String name;
     @Override
     public String toString() {
@@ -82,7 +82,6 @@ public class TestTxnCommands2 {
     hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
     hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
     TxnDbUtil.setConfValues(hiveConf);
-    hiveConf.setBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING, true);
     TxnDbUtil.prepDb();
     File f = new File(TEST_WAREHOUSE_DIR);
     if (f.exists()) {
@@ -330,6 +329,7 @@ public class TestTxnCommands2 {
     return rs;
   }
   private static final class RowComp implements Comparator<int[]> {
+    @Override
     public int compare(int[] row1, int[] row2) {
       assert row1 != null && row2 != null && row1.length == row2.length;
       for(int i = 0; i < row1.length; i++) {
@@ -361,7 +361,7 @@ public class TestTxnCommands2 {
     sb.setLength(sb.length() - 1);//remove trailing comma
     return sb.toString();
   }
-  
+
   private List<String> runStatementOnDriver(String stmt) throws Exception {
     CommandProcessorResponse cpr = d.run(stmt);
     if(cpr.getResponseCode() != 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 44ad8b0..c6a7fcb 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -51,7 +51,6 @@ public class TestDbTxnManager2 {
   public static void setUpClass() throws Exception {
     TxnDbUtil.setConfValues(conf);
     conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
-    conf.setBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING, true);
   }
   @Before
   public void setUp() throws Exception {
@@ -211,8 +210,8 @@ public class TestDbTxnManager2 {
     Assert.assertEquals("Unexpected number of locks found", 0, locks.size());
     checkCmdOnDriver(cpr);
   }
-  
-  
+
+
   private void checkLock(LockType type, LockState state, String db, String table, String partition, ShowLocksResponseElement l) {
     Assert.assertEquals(l.toString(),l.getType(), type);
     Assert.assertEquals(l.toString(),l.getState(), state);

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/acid_overwrite.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/acid_overwrite.q b/ql/src/test/queries/clientnegative/acid_overwrite.q
index 2e57a3c..9ccf31e 100644
--- a/ql/src/test/queries/clientnegative/acid_overwrite.q
+++ b/ql/src/test/queries/clientnegative/acid_overwrite.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/archive_corrupt.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_corrupt.q b/ql/src/test/queries/clientnegative/archive_corrupt.q
index 130b37b..ed49688 100644
--- a/ql/src/test/queries/clientnegative/archive_corrupt.q
+++ b/ql/src/test/queries/clientnegative/archive_corrupt.q
@@ -1,7 +1,7 @@
 USE default;
 
 set hive.archive.enabled = true;
-set hive.enforce.bucketing = true;
+
 
 drop table tstsrcpart;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q b/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
index f2de306..28c256e 100644
--- a/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
+++ b/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 -- check update without update priv

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q b/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
index c00c0eb..674ad1e 100644
--- a/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
+++ b/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 -- check update without update priv

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/delete_non_acid_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/delete_non_acid_table.q b/ql/src/test/queries/clientnegative/delete_non_acid_table.q
index 6ae82ff..ec3d803 100644
--- a/ql/src/test/queries/clientnegative/delete_non_acid_table.q
+++ b/ql/src/test/queries/clientnegative/delete_non_acid_table.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing=true;
+
 
 create table not_an_acid_table2(a int, b varchar(128));
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/delete_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/delete_not_bucketed.q b/ql/src/test/queries/clientnegative/delete_not_bucketed.q
index 80dffea..d575a8f 100644
--- a/ql/src/test/queries/clientnegative/delete_not_bucketed.q
+++ b/ql/src/test/queries/clientnegative/delete_not_bucketed.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_notbucketed(a int, b varchar(128)) stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/delete_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/delete_sorted.q b/ql/src/test/queries/clientnegative/delete_sorted.q
index fd8d579..9f82c1f 100644
--- a/ql/src/test/queries/clientnegative/delete_sorted.q
+++ b/ql/src/test/queries/clientnegative/delete_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) partitioned by (ds string) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/insert_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_sorted.q b/ql/src/test/queries/clientnegative/insert_sorted.q
index 18c942a..cd1a69c 100644
--- a/ql/src/test/queries/clientnegative/insert_sorted.q
+++ b/ql/src/test/queries/clientnegative/insert_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/insert_values_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/insert_values_sorted.q b/ql/src/test/queries/clientnegative/insert_values_sorted.q
index 260e2fb..ee26402 100644
--- a/ql/src/test/queries/clientnegative/insert_values_sorted.q
+++ b/ql/src/test/queries/clientnegative/insert_values_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/merge_negative_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/merge_negative_3.q b/ql/src/test/queries/clientnegative/merge_negative_3.q
index 6bc645e..f5eb231 100644
--- a/ql/src/test/queries/clientnegative/merge_negative_3.q
+++ b/ql/src/test/queries/clientnegative/merge_negative_3.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 create table srcpart2 (key int, value string) partitioned by (ds string) clustered by (key) sorted by (key) into 2 buckets stored as RCFILE;
 insert overwrite table srcpart2 partition (ds='2011') select * from src;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q b/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
index 880323c..c252d86 100644
--- a/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
+++ b/ql/src/test/queries/clientnegative/smb_bucketmapjoin.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+
+
 set hive.exec.reducers.max = 1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/smb_mapjoin_14.q b/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
index 54bfba0..4c93542 100644
--- a/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientnegative/smb_mapjoin_14.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q b/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
index 7d11f45..8fbbd96 100644
--- a/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
+++ b/ql/src/test/queries/clientnegative/sortmerge_mapjoin_mismatch_1.q
@@ -3,8 +3,8 @@ INTO 1 BUCKETS STORED AS RCFILE;
 create table table_desc(key int, value string) CLUSTERED BY (key) SORTED BY (key desc) 
 INTO 1 BUCKETS STORED AS RCFILE;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+
+
 
 insert overwrite table table_asc select key, value from src; 
 insert overwrite table table_desc select key, value from src;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/update_bucket_col.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_bucket_col.q b/ql/src/test/queries/clientnegative/update_bucket_col.q
index 515e024..c471a4c 100644
--- a/ql/src/test/queries/clientnegative/update_bucket_col.q
+++ b/ql/src/test/queries/clientnegative/update_bucket_col.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/update_no_such_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_no_such_table.q b/ql/src/test/queries/clientnegative/update_no_such_table.q
index 07239cf..dffbab4 100644
--- a/ql/src/test/queries/clientnegative/update_no_such_table.q
+++ b/ql/src/test/queries/clientnegative/update_no_such_table.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 update no_such_table set b = 'fred';

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/update_non_acid_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_non_acid_table.q b/ql/src/test/queries/clientnegative/update_non_acid_table.q
index dd0b01e..da46141 100644
--- a/ql/src/test/queries/clientnegative/update_non_acid_table.q
+++ b/ql/src/test/queries/clientnegative/update_non_acid_table.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing=true;
+
 
 create table not_an_acid_table(a int, b varchar(128));
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/update_not_bucketed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_not_bucketed.q b/ql/src/test/queries/clientnegative/update_not_bucketed.q
index 8512fa7..d7d0da4 100644
--- a/ql/src/test/queries/clientnegative/update_not_bucketed.q
+++ b/ql/src/test/queries/clientnegative/update_not_bucketed.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_notbucketed(a int, b varchar(128)) partitioned by (ds string) stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/update_partition_col.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_partition_col.q b/ql/src/test/queries/clientnegative/update_partition_col.q
index e9c60cc..78d381e 100644
--- a/ql/src/test/queries/clientnegative/update_partition_col.q
+++ b/ql/src/test/queries/clientnegative/update_partition_col.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientnegative/update_sorted.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/update_sorted.q b/ql/src/test/queries/clientnegative/update_sorted.q
index 917c3b5..f9e5db5 100644
--- a/ql/src/test/queries/clientnegative/update_sorted.q
+++ b/ql/src/test/queries/clientnegative/update_sorted.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/acid_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_join.q b/ql/src/test/queries/clientpositive/acid_join.q
index 2e6aeae..dca4d7d 100644
--- a/ql/src/test/queries/clientpositive/acid_join.q
+++ b/ql/src/test/queries/clientpositive/acid_join.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 -- This test checks that a join with tables with two different buckets send the right bucket info to each table.
 create table acidjoin1(name varchar(50), age int) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true"); 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/acid_vectorization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization.q b/ql/src/test/queries/clientpositive/acid_vectorization.q
index 4b11412..514d3fa 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_partition.q b/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
index 00449bb..8dd1e09 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_partition.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 CREATE TABLE acid_vectorized_part(a INT, b STRING) partitioned by (ds string) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/acid_vectorization_project.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_project.q b/ql/src/test/queries/clientpositive/acid_vectorization_project.q
index a44b57a..2a5f59a 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_project.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_project.q
@@ -1,6 +1,6 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
deleted file mode 100644
index 627fcc1..0000000
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
+++ /dev/null
@@ -1,59 +0,0 @@
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
-create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets;
-
-alter table tst1 clustered by (key) into 8 buckets;
-
-describe formatted tst1;
-
-set hive.enforce.bucketing=true;
-insert overwrite table tst1 partition (ds='1') select key, value from src;
-
-describe formatted tst1 partition (ds = '1');
-
--- Test changing bucket number
-
-alter table tst1 clustered by (key) into 12 buckets;
-
-insert overwrite table tst1 partition (ds='1') select key, value from src;
-
-describe formatted tst1 partition (ds = '1');
-
-describe formatted tst1;
-
--- Test changing bucket number of (table/partition)
-
-alter table tst1 into 4 buckets;
-
-describe formatted tst1;
-
-describe formatted tst1 partition (ds = '1');
-
-alter table tst1 partition (ds = '1') into 6 buckets;
-
-describe formatted tst1;
-
-describe formatted tst1 partition (ds = '1');
-
--- Test adding sort order
-
-alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets;
-
-describe formatted tst1;
-
--- Test changing sort order
-
-alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets;
-
-describe formatted tst1;
-
--- Test removing test order
-
-alter table tst1 clustered by (value) into 12 buckets;
-
-describe formatted tst1;
-
--- Test removing buckets
-
-alter table tst1 not clustered;
-
-describe formatted tst1;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
deleted file mode 100644
index 2f26de8..0000000
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
+++ /dev/null
@@ -1,85 +0,0 @@
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
--- the partition metadata is updated as well.
-
-CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING);
-
-DESCRIBE FORMATTED tst1;
-
-SET hive.enforce.bucketing=true;
-SET hive.enforce.sorting=true;
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test an unbucketed partition gets converted to bucketed
-ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test an unsorted partition gets converted to sorted
-ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the bucket columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the number of buckets
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the sort columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test changing the sort order
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test a sorted partition gets converted to unsorted
-ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
-
--- Test a bucketed partition gets converted to unbucketed
-ALTER TABLE tst1 NOT CLUSTERED;
-
-DESCRIBE FORMATTED tst1;
-
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
-
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
index 2c2e184..15a88bb 100644
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
+++ b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
@@ -1,4 +1,3 @@
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
 -- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
 -- the partition metadata is updated as well.
 
@@ -6,8 +5,8 @@ CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING);
 
 DESCRIBE FORMATTED tst1;
 
-SET hive.enforce.bucketing=true;
-SET hive.enforce.sorting=true;
+
+
 INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
 
 DESCRIBE FORMATTED tst1 PARTITION (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
index 439f351..7d523d9 100644
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
+++ b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
@@ -5,7 +5,7 @@ alter table tst1 clustered by (key) into 8 buckets;
 
 describe formatted tst1;
 
-set hive.enforce.bucketing=true;
+
 insert overwrite table tst1 partition (ds='1') select key, value from src;
 
 describe formatted tst1 partition (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q b/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
index 316276a..b046f97 100644
--- a/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
+++ b/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
@@ -1,5 +1,5 @@
 set hive.archive.enabled = true;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.submitviachild=true;
 set hive.exec.submit.local.task.via.child=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/archive_multi.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/archive_multi.q b/ql/src/test/queries/clientpositive/archive_multi.q
index 1004aca..0259a3e 100644
--- a/ql/src/test/queries/clientpositive/archive_multi.q
+++ b/ql/src/test/queries/clientpositive/archive_multi.q
@@ -1,5 +1,5 @@
 set hive.archive.enabled = true;
-set hive.enforce.bucketing = true;
+;
 
 create database ac_test;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/authorization_delete.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_delete.q b/ql/src/test/queries/clientpositive/authorization_delete.q
index d96e6ab..fe1a9ac 100644
--- a/ql/src/test/queries/clientpositive/authorization_delete.q
+++ b/ql/src/test/queries/clientpositive/authorization_delete.q
@@ -4,7 +4,7 @@ set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.Sessi
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 set user.name=user1;
 -- current user has been set (comment line before the set cmd is resulting in parse error!!)

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_delete_own_table.q b/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
index 7abdc12..34dfa6a 100644
--- a/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
+++ b/ql/src/test/queries/clientpositive/authorization_delete_own_table.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 set user.name=user1;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/authorization_update.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_update.q b/ql/src/test/queries/clientpositive/authorization_update.q
index da1054e..5e57904 100644
--- a/ql/src/test/queries/clientpositive/authorization_update.q
+++ b/ql/src/test/queries/clientpositive/authorization_update.q
@@ -4,7 +4,7 @@ set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.Sessi
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 set user.name=user1;
 -- current user has been set (comment line before the set cmd is resulting in parse error!!)

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/authorization_update_own_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_update_own_table.q b/ql/src/test/queries/clientpositive/authorization_update_own_table.q
index ace1ce2..e3292d2 100644
--- a/ql/src/test/queries/clientpositive/authorization_update_own_table.q
+++ b/ql/src/test/queries/clientpositive/authorization_update_own_table.q
@@ -5,7 +5,7 @@ set hive.security.authorization.enabled=true;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-set hive.enforce.bucketing=true;
+
 
 
 set user.name=user1;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q b/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
index 699777e..4dca15b 100644
--- a/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
+++ b/ql/src/test/queries/clientpositive/auto_smb_mapjoin_14.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
index c07dd23..77b2282 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
index f35fec1..1c868dc 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
index eabeff0..3fa1463 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
index a553d93..64b3e5f 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
index cb244cf..83b67f8 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
@@ -1,8 +1,8 @@
 set hive.auto.convert.join=true;
 
 set hive.exec.dynamic.partition.mode=nonstrict;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
index 0ddf378..33fe283 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_6.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 set hive.explain.user=false;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
index 9eb85d3..917aec9 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
@@ -1,6 +1,6 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket1.q b/ql/src/test/queries/clientpositive/bucket1.q
index 0154b4e..6a59465 100644
--- a/ql/src/test/queries/clientpositive/bucket1.q
+++ b/ql/src/test/queries/clientpositive/bucket1.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 200;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket2.q b/ql/src/test/queries/clientpositive/bucket2.q
index ecd7e53..4e63859 100644
--- a/ql/src/test/queries/clientpositive/bucket2.q
+++ b/ql/src/test/queries/clientpositive/bucket2.q
@@ -1,5 +1,5 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket3.q b/ql/src/test/queries/clientpositive/bucket3.q
index 7b7a9c3..b11e4da 100644
--- a/ql/src/test/queries/clientpositive/bucket3.q
+++ b/ql/src/test/queries/clientpositive/bucket3.q
@@ -1,5 +1,5 @@
 set hive.explain.user=false;
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket4.q b/ql/src/test/queries/clientpositive/bucket4.q
index 1b49c7a..7cd962d 100644
--- a/ql/src/test/queries/clientpositive/bucket4.q
+++ b/ql/src/test/queries/clientpositive/bucket4.q
@@ -1,7 +1,7 @@
 set hive.explain.user=false;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket5.q b/ql/src/test/queries/clientpositive/bucket5.q
index 877f8a5..0b3bcc5 100644
--- a/ql/src/test/queries/clientpositive/bucket5.q
+++ b/ql/src/test/queries/clientpositive/bucket5.q
@@ -1,6 +1,6 @@
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles = true;
 set hive.merge.mapredfiles = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket6.q b/ql/src/test/queries/clientpositive/bucket6.q
index fb55787..a12f6bd 100644
--- a/ql/src/test/queries/clientpositive/bucket6.q
+++ b/ql/src/test/queries/clientpositive/bucket6.q
@@ -1,7 +1,7 @@
 CREATE TABLE src_bucket(key STRING, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
 
-set hive.enforce.sorting = true;
-set hive.enforce.bucketing = true;
+
+;
 
 explain
 insert into table src_bucket select key,value from srcpart;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket_many.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_many.q b/ql/src/test/queries/clientpositive/bucket_many.q
index 1f0b795..8a64ff1 100644
--- a/ql/src/test/queries/clientpositive/bucket_many.q
+++ b/ql/src/test/queries/clientpositive/bucket_many.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set mapred.reduce.tasks = 16;
 
 create table bucket_many(key int, value string) clustered by (key) into 256 buckets;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket_map_join_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_1.q b/ql/src/test/queries/clientpositive/bucket_map_join_1.q
index 6bdb09e..deae460 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_1.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_1.q
@@ -1,8 +1,8 @@
 drop table table1;
 drop table table2;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 
 create table table1(key string, value string) clustered by (key, value)
 sorted by (key, value) into 1 BUCKETS stored as textfile;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket_map_join_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_2.q b/ql/src/test/queries/clientpositive/bucket_map_join_2.q
index 07f6d15..f416706 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_2.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_2.q
@@ -1,8 +1,8 @@
 drop table table1;
 drop table table2;
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 
 create table table1(key string, value string) clustered by (key, value)
 sorted by (key desc, value desc) into 1 BUCKETS stored as textfile;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q b/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
index 1ca20e4..4b75685 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_spark4.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS
@@ -17,8 +17,8 @@ select * from src where key < 10;
 insert overwrite table tbl3
 select * from src where key < 10;
 
-set hive.enforce.bucketing = false;
-set hive.enforce.sorting = false;
+;
+
 set hive.exec.reducers.max = 100;
 
 set hive.auto.convert.join=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
index 8546e78..40dad17 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
@@ -15,8 +15,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
index 2f968bd..1e7db5e 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
@@ -15,8 +15,8 @@ load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_m
 load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
 
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting = true;
+
+
 set hive.optimize.bucketingsorting=false;
 insert overwrite table tab_part partition (ds='2008-04-08')
 select key,value from srcbucket_mapjoin_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket_num_reducers.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_num_reducers.q b/ql/src/test/queries/clientpositive/bucket_num_reducers.q
index 37ae6cc..06f334e 100644
--- a/ql/src/test/queries/clientpositive/bucket_num_reducers.q
+++ b/ql/src/test/queries/clientpositive/bucket_num_reducers.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set hive.exec.mode.local.auto=false;
 set mapred.reduce.tasks = 10;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_num_reducers2.q b/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
index 8c64d60..48e5f01 100644
--- a/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
+++ b/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
@@ -1,4 +1,4 @@
-set hive.enforce.bucketing = true;
+;
 set hive.exec.mode.local.auto=false;
 set hive.exec.reducers.max = 2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucketmapjoin13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin13.q b/ql/src/test/queries/clientpositive/bucketmapjoin13.q
index f01c43e..fd2f22a 100644
--- a/ql/src/test/queries/clientpositive/bucketmapjoin13.q
+++ b/ql/src/test/queries/clientpositive/bucketmapjoin13.q
@@ -1,5 +1,5 @@
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max=1;
 
 CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucketmapjoin6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin6.q b/ql/src/test/queries/clientpositive/bucketmapjoin6.q
index a0ef371..9da0619 100644
--- a/ql/src/test/queries/clientpositive/bucketmapjoin6.q
+++ b/ql/src/test/queries/clientpositive/bucketmapjoin6.q
@@ -7,8 +7,8 @@ create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 bu
 create table tmp2 (a string, b string) clustered by (a) sorted by (a) into 10 buckets;
 
 
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max=1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
index 8cc308f..8f8d625 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_1.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
index 9ecd2c4..a66378c 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_2.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
index 91e97de..6027707 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q
@@ -1,7 +1,7 @@
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
index 623b22b..0f1e8c6 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_4.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
index 205a450..6f4becd 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_5.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
index a4e84f8..a609422 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_6.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
index f597884..b8370c6 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_7.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
index 95a9a64..b34f8d1 100644
--- a/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
+++ b/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_8.q
@@ -2,8 +2,8 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
-set hive.enforce.bucketing=true;
-set hive.enforce.sorting=true;
+
+
 set hive.exec.reducers.max = 1;
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false; 

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
index 096ae10..b906db2 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_auto_join1.q
@@ -1,7 +1,7 @@
 set hive.cbo.returnpath.hiveop=true;
 set hive.stats.fetch.column.stats=true;
-set hive.enforce.bucketing = true;
-set hive.enforce.sorting = true;
+;
+
 set hive.exec.reducers.max = 1;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/a53d2af5/ql/src/test/queries/clientpositive/combine3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/combine3.q b/ql/src/test/queries/clientpositive/combine3.q
index c9afc91..9e5809b 100644
--- a/ql/src/test/queries/clientpositive/combine3.q
+++ b/ql/src/test/queries/clientpositive/combine3.q
@@ -20,7 +20,7 @@ desc extended combine_3_srcpart_seq_rc partition(ds="2010-08-03", hr="001");
 
 select key, value, ds, hr from combine_3_srcpart_seq_rc where ds="2010-08-03" order by key, hr limit 30;
 
-set hive.enforce.bucketing = true;
+;
 set hive.exec.reducers.max = 1;
 
 drop table bucket3_1;


[25/27] hive git commit: HIVE-12338: Add webui to HiveServer2 (Jimmy, reviewed by Mohit, Szehon, Lefty)

Posted by om...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/resources/hive-webapps/static/css/hive.css
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/static/css/hive.css b/service/src/resources/hive-webapps/static/css/hive.css
new file mode 100644
index 0000000..b8c9f54
--- /dev/null
+++ b/service/src/resources/hive-webapps/static/css/hive.css
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* General styling */
+body { padding-top: 60px; }
+.logo img { float: right; }
+.inner_header { margin-bottom: 1em; }
+section { margin-bottom: 3em; }
+

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.eot
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.eot b/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.eot
new file mode 100755
index 0000000..87eaa43
Binary files /dev/null and b/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.eot differ

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.svg
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.svg b/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.svg
new file mode 100755
index 0000000..5fee068
--- /dev/null
+++ b/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.svg
@@ -0,0 +1,228 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
+<font-face units-per-em="1200" ascent="960" descent="-240" />
+<missing-glyph horiz-adv-x="500" />
+<glyph />
+<glyph />
+<glyph unicode=" " />
+<glyph unicode="*" d="M1100 500h-259l183 -183l-141 -141l-183 183v-259h-200v259l-183 -183l-141 141l183 183h-259v200h259l-183 183l141 141l183 -183v259h200v-259l183 183l141 -141l-183 -183h259v-200z" />
+<glyph unicode="+" d="M1100 400h-400v-400h-300v400h-400v300h400v400h300v-400h400v-300z" />
+<glyph unicode="&#xa0;" />
+<glyph unicode="&#x2000;" horiz-adv-x="652" />
+<glyph unicode="&#x2001;" horiz-adv-x="1304" />
+<glyph unicode="&#x2002;" horiz-adv-x="652" />
+<glyph unicode="&#x2003;" horiz-adv-x="1304" />
+<glyph unicode="&#x2004;" horiz-adv-x="434" />
+<glyph unicode="&#x2005;" horiz-adv-x="326" />
+<glyph unicode="&#x2006;" horiz-adv-x="217" />
+<glyph unicode="&#x2007;" horiz-adv-x="217" />
+<glyph unicode="&#x2008;" horiz-adv-x="163" />
+<glyph unicode="&#x2009;" horiz-adv-x="260" />
+<glyph unicode="&#x200a;" horiz-adv-x="72" />
+<glyph unicode="&#x202f;" horiz-adv-x="260" />
+<glyph unicode="&#x205f;" horiz-adv-x="326" />
+<glyph unicode="&#x20ac;" d="M800 500h-300q9 -74 33 -132t52.5 -91t62 -54.5t59 -29t46.5 -7.5q29 0 66 13t75 37t63.5 67.5t25.5 96.5h174q-31 -172 -128 -278q-107 -117 -274 -117q-205 0 -324 158q-36 46 -69 131.5t-45 205.5h-217l100 100h113q0 47 5 100h-218l100 100h135q37 167 112 257 q117 141 297 141q242 0 354 -189q60 -103 66 -209h-181q0 55 -25.5 99t-63.5 68t-75 36.5t-67 12.5q-24 0 -52.5 -10t-62.5 -32t-65.5 -67t-50.5 -107h379l-100 -100h-300q-6 -46 -6 -100h406z" />
+<glyph unicode="&#x2212;" d="M1100 700h-900v-300h900v300z" />
+<glyph unicode="&#x2601;" d="M178 300h750q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57z" />
+<glyph unicode="&#x2709;" d="M1200 1100h-1200l600 -603zM300 600l-300 -300v600zM1200 900v-600l-300 300zM800 500l400 -400h-1200l400 400l200 -200z" />
+<glyph unicode="&#x270f;" d="M1101 889l99 92q13 13 13 32.5t-13 33.5l-153 153q-15 13 -33 13t-33 -13l-94 -97zM401 189l614 614l-214 214l-614 -614zM-13 -13l333 112l-223 223z" />
+<glyph unicode="&#xe000;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#xe001;" d="M700 100h300v-100h-800v100h300v550l-500 550h1200l-500 -550v-550z" />
+<glyph unicode="&#xe002;" d="M1000 934v-521q-64 16 -138 -7q-79 -26 -122.5 -83t-25.5 -111q17 -55 85.5 -75.5t147.5 4.5q70 23 111.5 63.5t41.5 95.5v881q0 10 -7 15.5t-17 2.5l-752 -193q-10 -3 -17 -12.5t-7 -19.5v-689q-64 17 -138 -7q-79 -25 -122.5 -82t-25.5 -112t86 -75.5t147 5.5 q65 21 109 69t44 90v606z" />
+<glyph unicode="&#xe003;" d="M913 432l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -142 -78 -261zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233z" />
+<glyph unicode="&#xe005;" d="M649 949q48 69 109.5 105t121.5 38t118.5 -20.5t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-149.5 152.5t-126.5 127.5t-94 124.5t-33.5 117.5q0 64 28 123t73 100.5t104.5 64t119 20.5 t120 -38.5t104.5 -104.5z" />
+<glyph unicode="&#xe006;" d="M791 522l145 -449l-384 275l-382 -275l146 447l-388 280h479l146 400h2l146 -400h472zM168 71l2 1z" />
+<glyph unicode="&#xe007;" d="M791 522l145 -449l-384 275l-382 -275l146 447l-388 280h479l146 400h2l146 -400h472zM747 331l-74 229l193 140h-235l-77 211l-78 -211h-239l196 -142l-73 -226l192 140zM168 71l2 1z" />
+<glyph unicode="&#xe008;" d="M1200 143v-143h-1200v143l400 257v100q-37 0 -68.5 74.5t-31.5 125.5v200q0 124 88 212t212 88t212 -88t88 -212v-200q0 -51 -31.5 -125.5t-68.5 -74.5v-100z" />
+<glyph unicode="&#xe009;" d="M1200 1100v-1100h-1200v1100h1200zM200 1000h-100v-100h100v100zM900 1000h-600v-400h600v400zM1100 1000h-100v-100h100v100zM200 800h-100v-100h100v100zM1100 800h-100v-100h100v100zM200 600h-100v-100h100v100zM1100 600h-100v-100h100v100zM900 500h-600v-400h600 v400zM200 400h-100v-100h100v100zM1100 400h-100v-100h100v100zM200 200h-100v-100h100v100zM1100 200h-100v-100h100v100z" />
+<glyph unicode="&#xe010;" d="M500 1050v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5zM1100 1050v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400 q21 0 35.5 -14.5t14.5 -35.5zM500 450v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5zM1100 450v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5v400 q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5z" />
+<glyph unicode="&#xe011;" d="M300 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200 q21 0 35.5 -14.5t14.5 -35.5zM1100 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM300 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM700 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1100 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM300 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM
 700 250v-200 q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1100 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5 t14.5 -35.5z" />
+<glyph unicode="&#xe012;" d="M300 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1200 1050v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h700 q21 0 35.5 -14.5t14.5 -35.5zM300 450v200q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-200q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM1200 650v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5v200 q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5zM300 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5zM1200 250v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700 q-21 0 -35.5 14.5t-14.5 35.5v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5z" />
+<glyph unicode="&#xe013;" d="M448 34l818 820l-212 212l-607 -607l-206 207l-212 -212z" />
+<glyph unicode="&#xe014;" d="M882 106l-282 282l-282 -282l-212 212l282 282l-282 282l212 212l282 -282l282 282l212 -212l-282 -282l282 -282z" />
+<glyph unicode="&#xe015;" d="M913 432l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -142 -78 -261zM507 363q137 0 233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5t-234 -97t-97 -233 t97 -233t234 -97zM600 800h100v-200h-100v-100h-200v100h-100v200h100v100h200v-100z" />
+<glyph unicode="&#xe016;" d="M913 432l300 -299q7 -7 7 -18t-7 -18l-109 -109q-8 -8 -18 -8t-18 8l-300 299q-120 -77 -261 -77q-200 0 -342 142t-142 342t142 342t342 142t342 -142t142 -342q0 -141 -78 -262zM176 694q0 -136 97 -233t234 -97t233.5 97t96.5 233t-96.5 233t-233.5 97t-234 -97 t-97 -233zM300 801v-200h400v200h-400z" />
+<glyph unicode="&#xe017;" d="M700 750v400q0 21 -14.5 35.5t-35.5 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-400q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5zM800 975v166q167 -62 272 -210t105 -331q0 -118 -45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123 t-123 184t-45.5 224.5q0 183 105 331t272 210v-166q-103 -55 -165 -155t-62 -220q0 -177 125 -302t302 -125t302 125t125 302q0 120 -62 220t-165 155z" />
+<glyph unicode="&#xe018;" d="M1200 1h-200v1200h200v-1200zM900 1h-200v800h200v-800zM600 1h-200v500h200v-500zM300 301h-200v-300h200v300z" />
+<glyph unicode="&#xe019;" d="M488 183l38 -151q40 -5 74 -5q27 0 74 5l38 151l6 2q46 13 93 39l5 3l134 -81q56 44 104 105l-80 134l3 5q24 44 39 93l1 6l152 38q5 40 5 74q0 28 -5 73l-152 38l-1 6q-16 51 -39 93l-3 5l80 134q-44 58 -104 105l-134 -81l-5 3q-45 25 -93 39l-6 1l-38 152q-40 5 -74 5 q-27 0 -74 -5l-38 -152l-5 -1q-50 -14 -94 -39l-5 -3l-133 81q-59 -47 -105 -105l80 -134l-3 -5q-25 -47 -38 -93l-2 -6l-151 -38q-6 -48 -6 -73q0 -33 6 -74l151 -38l2 -6q14 -49 38 -93l3 -5l-80 -134q45 -59 105 -105l133 81l5 -3q45 -26 94 -39zM600 815q89 0 152 -63 t63 -151q0 -89 -63 -152t-152 -63t-152 63t-63 152q0 88 63 151t152 63z" />
+<glyph unicode="&#xe020;" d="M900 1100h275q10 0 17.5 -7.5t7.5 -17.5v-50q0 -11 -7 -18t-18 -7h-1050q-11 0 -18 7t-7 18v50q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5v-100zM800 1100v100h-300v-100h300zM200 900h900v-800q0 -41 -29.5 -71 t-70.5 -30h-700q-41 0 -70.5 30t-29.5 71v800zM300 100h100v700h-100v-700zM500 100h100v700h-100v-700zM700 100h100v700h-100v-700zM900 100h100v700h-100v-700z" />
+<glyph unicode="&#xe021;" d="M1301 601h-200v-600h-300v400h-300v-400h-300v600h-200l656 644z" />
+<glyph unicode="&#xe022;" d="M600 700h400v-675q0 -11 -7 -18t-18 -7h-850q-11 0 -18 7t-7 18v1150q0 11 7 18t18 7h475v-500zM1000 800h-300v300z" />
+<glyph unicode="&#xe023;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM600 600h200 v-100h-300v400h100v-300z" />
+<glyph unicode="&#xe024;" d="M721 400h-242l-40 -400h-539l431 1200h209l-21 -300h162l-20 300h208l431 -1200h-538zM712 500l-27 300h-170l-27 -300h224z" />
+<glyph unicode="&#xe025;" d="M1100 400v-400h-1100v400h490l-290 300h200v500h300v-500h200l-290 -300h490zM988 300h-175v-100h175v100z" />
+<glyph unicode="&#xe026;" d="M600 1199q122 0 233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233t47.5 233t127.5 191t191 127.5t233 47.5zM600 1012q-170 0 -291 -121t-121 -291t121 -291t291 -121t291 121 t121 291t-121 291t-291 121zM700 600h150l-250 -300l-250 300h150v300h200v-300z" />
+<glyph unicode="&#xe027;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM850 600h-150 v-300h-200v300h-150l250 300z" />
+<glyph unicode="&#xe028;" d="M0 500l200 700h800q199 -700 200 -700v-475q0 -11 -7 -18t-18 -7h-1150q-11 0 -18 7t-7 18v475zM903 1000h-606l-97 -500h200l50 -200h300l50 200h200z" />
+<glyph unicode="&#xe029;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5zM797 598 l-297 -201v401z" />
+<glyph unicode="&#xe030;" d="M1177 600h-150q0 -177 -125 -302t-302 -125t-302 125t-125 302t125 302t302 125q136 0 246 -81l-146 -146h400v400l-145 -145q-157 122 -355 122q-118 0 -224.5 -45.5t-184 -123t-123 -184t-45.5 -224.5t45.5 -224.5t123 -184t184 -123t224.5 -45.5t224.5 45.5t184 123 t123 184t45.5 224.5z" />
+<glyph unicode="&#xe031;" d="M700 800l147 147q-112 80 -247 80q-177 0 -302 -125t-125 -302h-150q0 118 45.5 224.5t123 184t184 123t224.5 45.5q198 0 355 -122l145 145v-400h-400zM500 400l-147 -147q112 -80 247 -80q177 0 302 125t125 302h150q0 -118 -45.5 -224.5t-123 -184t-184 -123 t-224.5 -45.5q-198 0 -355 122l-145 -145v400h400z" />
+<glyph unicode="&#xe032;" d="M100 1200v-1200h1100v1200h-1100zM1100 100h-900v900h900v-900zM400 800h-100v100h100v-100zM1000 800h-500v100h500v-100zM400 600h-100v100h100v-100zM1000 600h-500v100h500v-100zM400 400h-100v100h100v-100zM1000 400h-500v100h500v-100zM400 200h-100v100h100v-100 zM1000 300h-500v-100h500v100z" />
+<glyph unicode="&#xe034;" d="M200 0h-100v1100h100v-1100zM1100 600v500q-40 -81 -101.5 -115.5t-127.5 -29.5t-138 25t-139.5 40t-125.5 25t-103 -29.5t-65 -115.5v-500q60 60 127.5 84t127.5 17.5t122 -23t119 -30t110 -11t103 42t91 120.5z" />
+<glyph unicode="&#xe035;" d="M1200 275v300q0 116 -49.5 227t-131 192.5t-192.5 131t-227 49.5t-227 -49.5t-192.5 -131t-131 -192.5t-49.5 -227v-300q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 127 70.5 231.5t184.5 161.5t245 57t245 -57t184.5 -161.5t70.5 -231.5v-300q0 -11 7 -18t18 -7h50 q11 0 18 7t7 18zM400 480v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14zM1000 480v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14z" />
+<glyph unicode="&#xe036;" d="M0 800v-400h300l300 -200v800l-300 -200h-300zM971 600l141 -141l-71 -71l-141 141l-141 -141l-71 71l141 141l-141 141l71 71l141 -141l141 141l71 -71z" />
+<glyph unicode="&#xe037;" d="M0 800v-400h300l300 -200v800l-300 -200h-300zM700 857l69 53q111 -135 111 -310q0 -169 -106 -302l-67 54q86 110 86 248q0 146 -93 257z" />
+<glyph unicode="&#xe038;" d="M974 186l6 8q142 178 142 405q0 230 -144 408l-6 8l-83 -64l7 -8q123 -151 123 -344q0 -189 -119 -339l-7 -8zM300 801l300 200v-800l-300 200h-300v400h300zM702 858l69 53q111 -135 111 -310q0 -170 -106 -303l-67 55q86 110 86 248q0 145 -93 257z" />
+<glyph unicode="&#xe039;" d="M100 700h400v100h100v100h-100v300h-500v-600h100v100zM1200 700v500h-600v-200h100v-300h200v-300h300v200h-200v100h200zM100 1100h300v-300h-300v300zM800 800v300h300v-300h-300zM200 900h100v100h-100v-100zM900 1000h100v-100h-100v100zM300 600h-100v-100h-200 v-500h500v500h-200v100zM900 200v-100h-200v100h-100v100h100v200h-200v100h300v-300h200v-100h-100zM400 400v-300h-300v300h300zM300 200h-100v100h100v-100zM1100 300h100v-100h-100v100zM600 100h100v-100h-100v100zM1200 100v-100h-300v100h300z" />
+<glyph unicode="&#xe040;" d="M100 1200h-100v-1000h100v1000zM300 200h-100v1000h100v-1000zM700 200h-200v1000h200v-1000zM900 200h-100v1000h100v-1000zM1200 1200v-1000h-200v1000h200zM400 100v-100h-300v100h300zM500 91h100v-91h-100v91zM700 91h100v-91h-100v91zM1100 91v-91h-200v91h200z " />
+<glyph unicode="&#xe041;" d="M1200 500l-500 -500l-699 700v475q0 10 7.5 17.5t17.5 7.5h474zM320 882q29 29 29 71t-29 71q-30 30 -71.5 30t-71.5 -30q-29 -29 -29 -71t29 -71q30 -30 71.5 -30t71.5 30z" />
+<glyph unicode="&#xe042;" d="M1201 500l-500 -500l-699 700v475q0 11 7 18t18 7h474zM1501 500l-500 -500l-50 50l450 450l-700 700h100zM320 882q30 29 30 71t-30 71q-29 30 -71 30t-71 -30q-30 -29 -30 -71t30 -71q29 -30 71 -30t71 30z" />
+<glyph unicode="&#xe043;" d="M1200 1200v-1000l-100 -100v1000h-750l-100 -100h750v-1000h-900v1025l175 175h925z" />
+<glyph unicode="&#xe045;" d="M947 829l-94 346q-2 11 -10 18t-18 7h-450q-10 0 -18 -7t-10 -18l-94 -346l40 -124h592zM1200 800v-700h-200v200h-800v-200h-200v700h200l100 -200h600l100 200h200zM881 176l38 -152q2 -10 -3.5 -17t-15.5 -7h-600q-10 0 -15.5 7t-3.5 17l38 152q2 10 11.5 17t19.5 7 h500q10 0 19.5 -7t11.5 -17z" />
+<glyph unicode="&#xe047;" d="M1200 0v66q-34 1 -74 43q-18 19 -33 42t-21 37l-6 13l-385 998h-93l-399 -1006q-24 -48 -52 -75q-12 -12 -33 -25t-36 -20l-15 -7v-66h365v66q-41 0 -72 11t-49 38t1 71l92 234h391l82 -222q16 -45 -5.5 -88.5t-74.5 -43.5v-66h417zM416 521l178 457l46 -140l116 -317 h-340z" />
+<glyph unicode="&#xe048;" d="M100 1199h471q120 0 213 -88t93 -228q0 -55 -11.5 -101.5t-28 -74t-33.5 -47.5t-28 -28l-12 -7q8 -3 21.5 -9t48 -31.5t60.5 -58t47.5 -91.5t21.5 -129q0 -84 -59 -156.5t-142 -111t-162 -38.5h-500v89q41 7 70.5 32.5t29.5 65.5v827q0 28 -1 39.5t-5.5 26t-15.5 21 t-29 14t-49 14.5v70zM400 1079v-379h139q76 0 130 61.5t54 138.5q0 82 -84 130.5t-239 48.5zM400 200h161q89 0 153 48.5t64 132.5q0 90 -62.5 154.5t-156.5 64.5h-159v-400z" />
+<glyph unicode="&#xe049;" d="M877 1200l2 -57q-33 -8 -62 -25.5t-46 -37t-29.5 -38t-17.5 -30.5l-5 -12l-128 -825q-10 -52 14 -82t95 -36v-57h-500v57q77 7 134.5 40.5t65.5 80.5l173 849q10 56 -10 74t-91 37q-6 1 -10.5 2.5t-9.5 2.5v57h425z" />
+<glyph unicode="&#xe050;" d="M1150 1200h150v-300h-50q0 29 -8 48.5t-18.5 30t-33.5 15t-39.5 5.5t-50.5 1h-200v-850l100 -50v-100h-400v100l100 50v850h-200q-34 0 -50.5 -1t-40 -5.5t-33.5 -15t-18.5 -30t-8.5 -48.5h-49v300h150h700zM100 1000v-800h75l-125 -167l-125 167h75v800h-75l125 167 l125 -167h-75z" />
+<glyph unicode="&#xe051;" d="M950 1201h150v-300h-50q0 29 -8 48.5t-18 30t-33.5 15t-40 5.5t-50.5 1h-200v-650l100 -50v-100h-400v100l100 50v650h-200q-34 0 -50.5 -1t-39.5 -5.5t-33.5 -15t-18.5 -30t-8 -48.5h-50v300h150h700zM200 101h800v75l167 -125l-167 -125v75h-800v-75l-167 125l167 125 v-75z" />
+<glyph unicode="&#xe052;" d="M700 950v100q0 21 -14.5 35.5t-35.5 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h600q21 0 35.5 15t14.5 35zM1100 650v100q0 21 -14.5 35.5t-35.5 14.5h-1000q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h1000 q21 0 35.5 15t14.5 35zM900 350v100q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35zM1200 50v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35 t35.5 -15h1100q21 0 35.5 15t14.5 35z" />
+<glyph unicode="&#xe053;" d="M1000 950v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35zM1200 650v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h1100 q21 0 35.5 15t14.5 35zM1000 350v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35zM1200 50v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35 t35.5 -15h1100q21 0 35.5 15t14.5 35z" />
+<glyph unicode="&#xe054;" d="M500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-600q-21 0 -35.5 15t-14.5 35zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1000q-21 0 -35.5 15 t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe055;" d="M0 950v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15 t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe056;" d="M0 950v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 950v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 650v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800 q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35zM0 50v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM300 50v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-10
 0q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe057;" d="M400 1100h-100v-1100h100v1100zM700 950v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35zM1100 650v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15 h500q20 0 35 15t15 35zM100 425v75h-201v100h201v75l166 -125zM900 350v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35zM1200 50v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5 v-100q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35z" />
+<glyph unicode="&#xe058;" d="M201 950v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35zM801 1100h100v-1100h-100v1100zM601 650v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15 h500q20 0 35 15t15 35zM1101 425v75h200v100h-200v75l-167 -125zM401 350v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35zM701 50v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5 v-100q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35z" />
+<glyph unicode="&#xe059;" d="M900 925v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53v650q0 31 22 53t53 22h750q31 0 53 -22t22 -53zM1200 300l-300 300l300 300v-600z" />
+<glyph unicode="&#xe060;" d="M1200 1056v-1012q0 -18 -12.5 -31t-31.5 -13h-1112q-18 0 -31 13t-13 31v1012q0 18 13 31t31 13h1112q19 0 31.5 -13t12.5 -31zM1100 1000h-1000v-737l247 182l298 -131l-74 156l293 318l236 -288v500zM476 750q0 -56 -39 -95t-95 -39t-95 39t-39 95t39 95t95 39t95 -39 t39 -95z" />
+<glyph unicode="&#xe062;" d="M600 1213q123 0 227 -63t164.5 -169.5t60.5 -229.5t-73 -272q-73 -114 -166.5 -237t-150.5 -189l-57 -66q-10 9 -27 26t-66.5 70.5t-96 109t-104 135.5t-100.5 155q-63 139 -63 262q0 124 60.5 231.5t165 172t226.5 64.5zM599 514q107 0 182.5 75.5t75.5 182.5t-75.5 182 t-182.5 75t-182 -75.5t-75 -181.5q0 -107 75.5 -182.5t181.5 -75.5z" />
+<glyph unicode="&#xe063;" d="M600 1199q122 0 233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233t47.5 233t127.5 191t191 127.5t233 47.5zM600 173v854q-176 0 -301.5 -125t-125.5 -302t125.5 -302t301.5 -125z " />
+<glyph unicode="&#xe064;" d="M554 1295q21 -71 57.5 -142.5t76 -130.5t83 -118.5t82 -117t70 -116t50 -125.5t18.5 -136q0 -89 -39 -165.5t-102 -126.5t-140 -79.5t-156 -33.5q-114 6 -211.5 53t-161.5 138.5t-64 210.5q0 94 34 186t88.5 172.5t112 159t115 177t87.5 194.5zM455 296q-7 6 -18 17 t-34 48t-33 77q-15 73 -14 143.5t10 122.5l9 51q-92 -110 -119.5 -185t-12.5 -156q14 -82 59.5 -136t136.5 -80z" />
+<glyph unicode="&#xe065;" d="M1108 902l113 113l-21 85l-92 28l-113 -113zM1100 625v-225q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5q366 -6 397 -14l-186 -186h-311q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v125zM436 341l161 50l412 412l-114 113l-405 -405z" />
+<glyph unicode="&#xe066;" d="M1100 453v-53q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5h261l2 -80q-133 -32 -218 -120h-145q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5z M813 431l360 324l-359 318v-216q-7 0 -19 -1t-48 -8t-69.5 -18.5t-76.5 -37t-76.5 -59t-62 -88t-39.5 -121.5q30 38 81.5 64t103 35.5t99 14t77.5 3.5l29 -1v-209z" />
+<glyph unicode="&#xe067;" d="M1100 569v-169q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5h300q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69z M625 348l566 567l-136 137l-430 -431l-147 147l-136 -136z" />
+<glyph unicode="&#xe068;" d="M900 303v198h-200v-200h195l-295 -300l-300 300h200v200h-200v-198l-300 300l300 296v-198h200v200h-200l300 300l295 -300h-195v-200h200v198l300 -296z" />
+<glyph unicode="&#xe069;" d="M900 0l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-1100z" />
+<glyph unicode="&#xe070;" d="M1200 0l-500 488v-488l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-487l500 487v-1100z" />
+<glyph unicode="&#xe071;" d="M1200 0l-500 488v-488l-564 550l564 550v-487l500 487v-1100z" />
+<glyph unicode="&#xe072;" d="M1100 550l-900 550v-1100z" />
+<glyph unicode="&#xe073;" d="M500 150v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM900 150v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -21 14.5 -35.5t35.5 -14.5h200 q21 0 35.5 14.5t14.5 35.5z" />
+<glyph unicode="&#xe074;" d="M1100 150v800q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-800q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35z" />
+<glyph unicode="&#xe075;" d="M500 0v488l-500 -488v1100l500 -487v487l564 -550z" />
+<glyph unicode="&#xe076;" d="M1050 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v488l-500 -488v1100l500 -487v487l500 -487v437q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe077;" d="M850 1100h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v1100l500 -487v437q0 21 14.5 35.5t35.5 14.5z" />
+<glyph unicode="&#xe078;" d="M650 1064l-550 -564h1100zM1200 350v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5z" />
+<glyph unicode="&#xe079;" d="M777 7l240 240l-353 353l353 353l-240 240l-592 -594z" />
+<glyph unicode="&#xe080;" d="M513 -46l-241 240l353 353l-353 353l241 240l572 -571l21 -22l-1 -1v-1z" />
+<glyph unicode="&#xe081;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 900v-200h-200v-200h200v-200h200v200h200v200h-200v200h-200z" />
+<glyph unicode="&#xe082;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM300 700v-200h600v200h-600z" />
+<glyph unicode="&#xe083;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM247 741l141 -141l-142 -141l213 -213l141 142l141 -142l213 213l-142 141l142 141l-213 212l-141 -141 l-141 142z" />
+<glyph unicode="&#xe084;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM546 623l-102 102l-174 -174l276 -277l411 411l-175 174z" />
+<glyph unicode="&#xe085;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 500h200q5 3 14 8t31.5 25.5t39.5 45.5t31 69t14 94q0 51 -17.5 89t-42 58t-58.5 32t-58.5 15t-51.5 3 q-105 0 -172 -56t-67 -183h144q4 0 11.5 -1t11 -1t6.5 3t3 9t1 11t3.5 8.5t3.5 6t5.5 4t6.5 2.5t9 1.5t9 0.5h11.5h12.5q19 0 30 -10t11 -26q0 -22 -4 -28t-27 -22q-5 -1 -12.5 -3t-27 -13.5t-34 -27t-26.5 -46t-11 -68.5zM500 400v-100h200v100h-200z" />
+<glyph unicode="&#xe086;" d="M600 1197q162 0 299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5t80 299.5t217.5 217.5t299.5 80zM500 900v-100h200v100h-200zM400 700v-100h100v-200h-100v-100h400v100h-100v300h-300z" />
+<glyph unicode="&#xe087;" d="M1200 700v-200h-203q-25 -102 -116.5 -186t-180.5 -117v-197h-200v197q-140 27 -208 102.5t-98 200.5h-194v200h194q15 60 36 104.5t55.5 86t88 69t126.5 40.5v200h200v-200q54 -20 113 -60t112.5 -105.5t71.5 -134.5h203zM700 500v-206q149 48 201 206h-201v200h200 q-25 74 -76 127.5t-124 76.5v-204h-200v203q-75 -24 -130 -77.5t-79 -125.5h209v-200h-210q24 -73 79.5 -127.5t130.5 -78.5v206h200z" />
+<glyph unicode="&#xe088;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM844 735 l-135 -135l135 -135l-109 -109l-135 135l-135 -135l-109 109l135 135l-135 135l109 109l135 -135l135 135z" />
+<glyph unicode="&#xe089;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM896 654 l-346 -345l-228 228l141 141l87 -87l204 205z" />
+<glyph unicode="&#xe090;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM248 385l568 567q-100 62 -216 62q-171 0 -292.5 -121.5t-121.5 -292.5q0 -115 62 -215zM955 809l-564 -564q97 -59 209 -59q171 0 292.5 121.5 t121.5 292.5q0 112 -59 209z" />
+<glyph unicode="&#xe091;" d="M1200 400h-600v-301l-600 448l600 453v-300h600v-300z" />
+<glyph unicode="&#xe092;" d="M600 400h-600v300h600v300l600 -453l-600 -448v301z" />
+<glyph unicode="&#xe093;" d="M1098 600h-298v-600h-300v600h-296l450 600z" />
+<glyph unicode="&#xe094;" d="M998 600l-449 -600l-445 600h296v600h300v-600h298z" />
+<glyph unicode="&#xe095;" d="M600 199v301q-95 -2 -183 -20t-170 -52t-147 -92.5t-100 -135.5q6 132 41 238.5t103.5 193t184 138t271.5 59.5v271l600 -453z" />
+<glyph unicode="&#xe096;" d="M1200 1200h-400l129 -129l-294 -294l142 -142l294 294l129 -129v400zM565 423l-294 -294l129 -129h-400v400l129 -129l294 294z" />
+<glyph unicode="&#xe097;" d="M871 730l129 -130h-400v400l129 -129l295 295l142 -141zM200 600h400v-400l-129 130l-295 -295l-142 141l295 295z" />
+<glyph unicode="&#xe101;" d="M600 1177q118 0 224.5 -45.5t184 -123t123 -184t45.5 -224.5t-45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5t45.5 224.5t123 184t184 123t224.5 45.5zM686 549l58 302q4 20 -8 34.5t-33 14.5h-207q-20 0 -32 -14.5t-8 -34.5 l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5t21.5 34.5zM700 400h-200v-100h200v100z" />
+<glyph unicode="&#xe102;" d="M1200 900h-111v6t-1 15t-3 18l-34 172q-11 39 -41.5 63t-69.5 24q-32 0 -61 -17l-239 -144q-22 -13 -40 -35q-19 24 -40 36l-238 144q-33 18 -62 18q-39 0 -69.5 -23t-40.5 -61l-35 -177q-2 -8 -3 -18t-1 -15v-6h-111v-100h100v-200h400v300h200v-300h400v200h100v100z M731 900l202 197q5 -12 12 -32.5t23 -64t25 -72t7 -28.5h-269zM481 900h-281q-3 0 14 48t35 96l18 47zM100 0h400v400h-400v-400zM700 400h400v-400h-400v400z" />
+<glyph unicode="&#xe103;" d="M0 121l216 193q-9 53 -13 83t-5.5 94t9 113t38.5 114t74 124q47 60 99.5 102.5t103 68t127.5 48t145.5 37.5t184.5 43.5t220 58.5q0 -189 -22 -343t-59 -258t-89 -181.5t-108.5 -120t-122 -68t-125.5 -30t-121.5 -1.5t-107.5 12.5t-87.5 17t-56.5 7.5l-99 -55l-201 -202 v143zM692 611q70 38 118.5 69.5t102 79t99 111.5t86.5 148q22 50 24 60t-6 19q-7 5 -17 5t-26.5 -14.5t-33.5 -39.5q-35 -51 -113.5 -108.5t-139.5 -89.5l-61 -32q-369 -197 -458 -401q-48 -111 -28.5 -117.5t86.5 76.5q55 66 367 234z" />
+<glyph unicode="&#xe105;" d="M1261 600l-26 -40q-6 -10 -20 -30t-49 -63.5t-74.5 -85.5t-97 -90t-116.5 -83.5t-132.5 -59t-145.5 -23.5t-145.5 23.5t-132.5 59t-116.5 83.5t-97 90t-74.5 85.5t-49 63.5t-20 30l-26 40l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5 t145.5 -23.5t132.5 -59t116.5 -83.5t97 -90t74.5 -85.5t49 -63.5t20 -30zM600 240q64 0 123.5 20t100.5 45.5t85.5 71.5t66.5 75.5t58 81.5t47 66q-1 1 -28.5 37.5t-42 55t-43.5 53t-57.5 63.5t-58.5 54q49 -74 49 -163q0 -124 -88 -212t-212 -88t-212 88t-88 212 q0 85 46 158q-102 -87 -226 -258q7 -10 40.5 -58t56 -78.5t68 -77.5t87.5 -75t103 -49.5t125 -21.5zM484 762l-107 -106q49 -124 154 -191l105 105q-37 24 -75 72t-57 84z" />
+<glyph unicode="&#xe106;" d="M906 1200l-314 -1200h-148l37 143q-82 21 -165 71.5t-140 102t-109.5 112t-72 88.5t-29.5 43l-26 40l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5q61 0 121 -17l37 142h148zM1261 600l-26 -40q-7 -12 -25.5 -38t-63.5 -79.5t-95.5 -102.5 t-124 -100t-146.5 -79l38 145q22 15 44.5 34t46 44t40.5 44t41 50.5t33.5 43.5t33 44t24.5 34q-97 127 -140 175l39 146q67 -54 131.5 -125.5t87.5 -103.5t36 -52zM513 264l37 141q-107 18 -178.5 101.5t-71.5 193.5q0 85 46 158q-102 -87 -226 -258q210 -282 393 -336z M484 762l-107 -106q49 -124 154 -191l47 47l23 87q-30 28 -59 69t-44 68z" />
+<glyph unicode="&#xe107;" d="M-47 0h1294q37 0 50.5 35.5t-7.5 67.5l-642 1056q-20 33 -48 36t-48 -29l-642 -1066q-21 -32 -7.5 -66t50.5 -34zM700 200v100h-200v-100h-345l445 723l445 -723h-345zM700 700h-200v-100l100 -300l100 300v100z" />
+<glyph unicode="&#xe108;" d="M800 711l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -91 100 -113v-64q0 -21 -13 -29t-32 1l-94 78h-222l-94 -78q-19 -9 -32 -1t-13 29v64q0 22 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5v41q0 20 11 44.5t26 38.5 l363 325v339q0 62 44 106t106 44t106 -44t44 -106v-339z" />
+<glyph unicode="&#xe110;" d="M941 800l-600 -600h-341v200h259l600 600h241v198l300 -295l-300 -300v197h-159zM381 678l141 142l-181 180h-341v-200h259zM1100 598l300 -295l-300 -300v197h-241l-181 181l141 142l122 -123h159v198z" />
+<glyph unicode="&#xe111;" d="M100 1100h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5z" />
+<glyph unicode="&#xe112;" d="M400 900h-300v300h300v-300zM1100 900h-300v300h300v-300zM1100 800v-200q0 -42 -3 -83t-15 -104t-31.5 -116t-58 -109.5t-89 -96.5t-129 -65.5t-174.5 -25.5t-174.5 25.5t-129 65.5t-89 96.5t-58 109.5t-31.5 116t-15 104t-3 83v200h300v-250q0 -113 6 -145 q17 -92 102 -117q39 -11 92 -11q37 0 66.5 5.5t50 15.5t36 24t24 31.5t14 37.5t7 42t2.5 45t0 47v25v250h300z" />
+<glyph unicode="&#xe113;" d="M902 184l226 227l-578 579l-580 -579l227 -227l352 353z" />
+<glyph unicode="&#xe114;" d="M650 218l578 579l-226 227l-353 -353l-352 353l-227 -227z" />
+<glyph unicode="&#xe115;" d="M1198 400v600h-796l215 -200h381v-400h-198l299 -283l299 283h-200zM-198 700l299 283l300 -283h-203v-400h385l215 -200h-800v600h-196z" />
+<glyph unicode="&#xe116;" d="M1050 1200h94q20 0 35 -14.5t15 -35.5t-15 -35.5t-35 -14.5h-54l-201 -961q-2 -4 -6 -10.5t-19 -17.5t-33 -11h-31v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-300v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-50q-21 0 -35.5 15t-14.5 35 q0 21 14.5 35.5t35.5 14.5h535l48 200h-633q-32 0 -54.5 21t-27.5 43l-100 475q-5 24 10 42q14 19 39 19h896l38 162q5 17 18.5 27.5t30.5 10.5z" />
+<glyph unicode="&#xe117;" d="M1200 1000v-100h-1200v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500zM0 800h1200v-800h-1200v800z" />
+<glyph unicode="&#xe118;" d="M201 800l-200 -400v600h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-200h-1000zM1501 700l-300 -700h-1200l300 700h1200z" />
+<glyph unicode="&#xe119;" d="M302 300h198v600h-198l298 300l298 -300h-198v-600h198l-298 -300z" />
+<glyph unicode="&#xe120;" d="M900 303v197h-600v-197l-300 297l300 298v-198h600v198l300 -298z" />
+<glyph unicode="&#xe121;" d="M31 400l172 739q5 22 23 41.5t38 19.5h672q19 0 37.5 -22.5t23.5 -45.5l172 -732h-1138zM100 300h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM900 200h-100v-100h100v100z M1100 200h-100v-100h100v100z" />
+<glyph unicode="&#xe122;" d="M1100 200v850q0 21 14.5 35.5t35.5 14.5q20 0 35 -14.5t15 -35.5v-850q0 -20 -15 -35t-35 -15q-21 0 -35.5 15t-14.5 35zM325 800l675 250v-850l-675 200h-38l47 -276q2 -12 -3 -17.5t-11 -6t-21 -0.5h-8h-83q-20 0 -34.5 14t-18.5 35q-56 337 -56 351v250v5 q0 13 0.5 18.5t2.5 13t8 10.5t15 3h200zM-101 600v50q0 24 25 49t50 38l25 13v-250l-11 5.5t-24 14t-30 21.5t-24 27.5t-11 31.5z" />
+<glyph unicode="&#xe124;" d="M445 1180l-45 -233l-224 78l78 -225l-233 -44l179 -156l-179 -155l233 -45l-78 -224l224 78l45 -233l155 179l155 -179l45 233l224 -78l-78 224l234 45l-180 155l180 156l-234 44l78 225l-224 -78l-45 233l-155 -180z" />
+<glyph unicode="&#xe125;" d="M700 1200h-50q-27 0 -51 -20t-38 -48l-96 -198l-145 -196q-20 -26 -20 -63v-400q0 -75 100 -75h61q123 -100 139 -100h250q46 0 83 57l238 344q29 31 29 74v100q0 44 -30.5 84.5t-69.5 40.5h-328q28 118 28 125v150q0 44 -30.5 84.5t-69.5 40.5zM700 925l-50 -225h450 v-125l-250 -375h-214l-136 100h-100v375l150 212l100 213h50v-175zM0 800v-600h200v600h-200z" />
+<glyph unicode="&#xe126;" d="M700 0h-50q-27 0 -51 20t-38 48l-96 198l-145 196q-20 26 -20 63v400q0 75 100 75h61q123 100 139 100h250q46 0 83 -57l238 -344q29 -31 29 -74v-100q0 -44 -30.5 -84.5t-69.5 -40.5h-328q28 -118 28 -125v-150q0 -44 -30.5 -84.5t-69.5 -40.5zM200 400h-200v600h200 v-600zM700 275l-50 225h450v125l-250 375h-214l-136 -100h-100v-375l150 -212l100 -213h50v175z" />
+<glyph unicode="&#xe127;" d="M364 873l362 230q14 6 25 6q17 0 29 -12l109 -112q14 -14 14 -34q0 -18 -11 -32l-85 -121h302q85 0 138.5 -38t53.5 -110t-54.5 -111t-138.5 -39h-107l-130 -339q-7 -22 -20.5 -41.5t-28.5 -19.5h-341q-7 0 -90 81t-83 94v525q0 17 14 35.5t28 28.5zM408 792v-503 l100 -89h293l131 339q6 21 19.5 41t28.5 20h203q16 0 25 15t9 36q0 20 -9 34.5t-25 14.5h-457h-6.5h-7.5t-6.5 0.5t-6 1t-5 1.5t-5.5 2.5t-4 4t-4 5.5q-5 12 -5 20q0 14 10 27l147 183l-86 83zM208 200h-200v600h200v-600z" />
+<glyph unicode="&#xe128;" d="M475 1104l365 -230q7 -4 16.5 -10.5t26 -26t16.5 -36.5v-526q0 -13 -85.5 -93.5t-93.5 -80.5h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-84 0 -139 39t-55 111t54 110t139 37h302l-85 121q-11 16 -11 32q0 21 14 34l109 113q13 12 29 12q11 0 25 -6zM370 946 l145 -184q10 -11 10 -26q0 -11 -5 -20q-1 -3 -3.5 -5.5l-4 -4t-5 -2.5t-5.5 -1.5t-6.5 -1t-6.5 -0.5h-7.5h-6.5h-476v-100h222q15 0 28.5 -20.5t19.5 -40.5l131 -339h293l106 89v502l-342 237zM1199 201h-200v600h200v-600z" />
+<glyph unicode="&#xe129;" d="M1100 473v342q0 15 -20 28.5t-41 19.5l-339 131v106q0 84 -39 139t-111 55t-110 -53.5t-38 -138.5v-302l-121 84q-15 12 -33.5 11.5t-32.5 -13.5l-112 -110q-22 -22 -6 -53l230 -363q4 -6 10.5 -15.5t26 -25t36.5 -15.5h525q13 0 94 83t81 90zM911 400h-503l-236 339 l83 86l183 -146q22 -18 47 -5q3 1 5.5 3.5l4 4t2.5 5t1.5 5.5t1 6.5t0.5 6v7.5v7v456q0 22 25 31t50 -0.5t25 -30.5v-202q0 -16 20 -29.5t41 -19.5l339 -130v-294zM1000 200v-200h-600v200h600z" />
+<glyph unicode="&#xe130;" d="M305 1104v200h600v-200h-600zM605 310l339 131q20 6 40.5 19.5t20.5 28.5v342q0 7 -81 90t-94 83h-525q-17 0 -35.5 -14t-28.5 -28l-10 -15l-230 -362q-15 -31 7 -53l112 -110q13 -13 32 -13.5t34 10.5l121 85l-1 -302q0 -84 38.5 -138t110.5 -54t111 55t39 139v106z M905 804v-294l-340 -130q-20 -6 -40 -20t-20 -29v-202q0 -22 -25 -31t-50 0t-25 31v456v14.5t-1.5 11.5t-5 12t-9.5 7q-24 13 -46 -5l-184 -146l-83 86l237 339h503z" />
+<glyph unicode="&#xe131;" d="M603 1195q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM598 701h-298v-201h300l-2 -194l402 294l-402 298v-197z" />
+<glyph unicode="&#xe132;" d="M597 1195q122 0 232.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-218 -217.5t-300 -80t-299.5 80t-217.5 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t231.5 47.5zM200 600l400 -294v194h302v201h-300v197z" />
+<glyph unicode="&#xe133;" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM300 600h200v-300h200v300h200l-300 400z" />
+<glyph unicode="&#xe134;" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM500 900v-300h-200l300 -400l300 400h-200v300h-200z" />
+<glyph unicode="&#xe135;" d="M603 1195q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5zM627 1101q-15 -12 -36.5 -21t-34.5 -12t-44 -8t-39 -6 q-15 -3 -45.5 0.5t-45.5 -2.5q-21 -7 -52 -26.5t-34 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -90.5t-29.5 -79.5q-8 -33 5.5 -92.5t7.5 -87.5q0 -9 17 -44t16 -60q12 0 23 -5.5t23 -15t20 -13.5q24 -12 108 -42q22 -8 53 -31.5t59.5 -38.5t57.5 -11q8 -18 -15 -55 t-20 -57q42 -71 87 -80q0 -6 -3 -15.5t-3.5 -14.5t4.5 -17q102 -2 221 112q30 29 47 47t34.5 49t20.5 62q-14 9 -37 9.5t-36 7.5q-14 7 -49 15t-52 19q-9 0 -39.5 -0.5t-46.5 -1.5t-39 -6.5t-39 -16.5q-50 -35 -66 -12q-4 2 -3.5 25.5t0.5 25.5q-6 13 -26.5 17t-24.5 7 q2 22 -2 41t-16.5 28t-38.5 -20q-23 -25 -42 4q-19 28 -8 58q6 16 22 22q6 -1 26 -1.5t33.5 -4t19.5 -13.5q12 -19 32 -37.5t34 -27.5l14 -8q0 3 9.5 39.5t5.5 57.5q-4 23 14.5 44.5t22.5 31.5q5 14 10 35t8.5 31t15.5 22.5t34 21.5q-6 18 1
 0 37q8 0 23.5 -1.5t24.5 -1.5 t20.5 4.5t20.5 15.5q-10 23 -30.5 42.5t-38 30t-49 26.5t-43.5 23q11 41 1 44q31 -13 58.5 -14.5t39.5 3.5l11 4q6 36 -17 53.5t-64 28.5t-56 23q-19 -3 -37 0zM613 994q0 -18 8 -42.5t16.5 -44t9.5 -23.5q-9 2 -31 5t-36 5t-32 8t-30 14q3 12 16 30t16 25q10 -10 18.5 -10 t14 6t14.5 14.5t16 12.5z" />
+<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M100 1196h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 1096h-200v-100h200v100zM100 796h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 696h-500v-100h500v100zM100 396h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5v100q0 41 29.5 70.5t70.5 29.5zM1100 296h-300v-100h300v100z " />
+<glyph unicode="&#xe138;" d="M1100 1200v-100h-1000v100h1000zM150 1000h900l-350 -500v-300l-200 -200v500z" />
+<glyph unicode="&#xe140;" d="M329 729l142 142l-200 200l129 129h-400v-400l129 129zM1200 1200v-400l-129 129l-200 -200l-142 142l200 200l-129 129h400zM271 129l129 -129h-400v400l129 -129l200 200l142 -142zM1071 271l129 129v-400h-400l129 129l-200 200l142 142z" />
+<glyph unicode="&#xe141;" d="M596 1192q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM596 1010q-171 0 -292.5 -121.5t-121.5 -292.5q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5zM455 905 q22 0 38 -16t16 -39t-16 -39t-38 -16q-23 0 -39 16.5t-16 38.5t16 38.5t39 16.5zM708 821l1 1q-9 14 -9 28q0 22 16 38.5t39 16.5q22 0 38 -16t16 -39t-16 -39t-38 -16q-14 0 -29 10l-55 -145q17 -22 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5t-61.5 25.5t-25.5 61.5 q0 32 20.5 56.5t51.5 29.5zM855 709q23 0 38.5 -15.5t15.5 -38.5t-16 -39t-38 -16q-23 0 -39 16t-16 39q0 22 16 38t39 16zM345 709q23 0 39 -16t16 -38q0 -23 -16 -39t-39 -16q-22 0 -38 16t-16 39t15.5 38.5t38.5 15.5z" />
+<glyph unicode="&#xe143;" d="M649 54l-16 22q-90 125 -293 323q-71 70 -104.5 105.5t-77 89.5t-61 99t-17.5 91q0 131 98.5 229.5t230.5 98.5q143 0 241 -129q103 129 246 129q129 0 226 -98.5t97 -229.5q0 -46 -17.5 -91t-61 -99t-77 -89.5t-104.5 -105.5q-203 -198 -293 -323zM844 524l12 12 q64 62 97.5 97t64.5 79t31 72q0 71 -48 119t-105 48q-74 0 -132 -82l-118 -171l-114 174q-51 79 -123 79q-60 0 -109.5 -49t-49.5 -118q0 -27 30.5 -70t61.5 -75.5t95 -94.5l22 -22q93 -90 190 -201q82 92 195 203z" />
+<glyph unicode="&#xe144;" d="M476 406l19 -17l105 105l-212 212l389 389l247 -247l-95 -96l18 -18q46 -46 77 -99l29 29q35 35 62.5 88t27.5 96q0 93 -66 159l-141 141q-66 66 -159 66q-95 0 -159 -66l-283 -283q-66 -64 -66 -159q0 -93 66 -159zM123 193l141 -141q66 -66 159 -66q95 0 159 66 l283 283q66 66 66 159t-66 159l-141 141q-12 12 -19 17l-105 -105l212 -212l-389 -389l-247 248l95 95l-18 18q-46 45 -75 101l-55 -55q-66 -66 -66 -159q0 -94 66 -160z" />
+<glyph unicode="&#xe145;" d="M200 100v953q0 21 30 46t81 48t129 38t163 15t162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5zM900 1000h-600v-700h600v700zM600 46q43 0 73.5 30.5t30.5 73.5t-30.5 73.5t-73.5 30.5t-73.5 -30.5t-30.5 -73.5 t30.5 -73.5t73.5 -30.5z" />
+<glyph unicode="&#xe148;" d="M700 1029v-307l64 -14q34 -7 64 -16.5t70 -31.5t67.5 -52t47.5 -80.5t20 -112.5q0 -139 -89 -224t-244 -96v-77h-100v78q-152 17 -237 104q-40 40 -52.5 93.5t-15.5 139.5h139q5 -77 48.5 -126.5t117.5 -64.5v335l-27 7q-46 14 -79 26.5t-72 36t-62.5 52t-40 72.5 t-16.5 99q0 92 44 159.5t109 101t144 40.5v78h100v-79q38 -4 72.5 -13.5t75.5 -31.5t71 -53.5t51.5 -84t24.5 -118.5h-159q-8 72 -35 109.5t-101 50.5zM600 755v274q-61 -8 -97.5 -37.5t-36.5 -102.5q0 -29 8 -51t16.5 -34t29.5 -22.5t31 -13.5t38 -10q7 -2 11 -3zM700 548 v-311q170 18 170 151q0 64 -44 99.5t-126 60.5z" />
+<glyph unicode="&#xe149;" d="M866 300l50 -147q-41 -25 -80.5 -36.5t-59 -13t-61.5 -1.5q-23 0 -128 33t-155 29q-39 -4 -82 -17t-66 -25l-24 -11l-55 145l16.5 11t15.5 10t13.5 9.5t14.5 12t14.5 14t17.5 18.5q48 55 54 126.5t-30 142.5h-221v100h166q-24 49 -44 104q-10 26 -14.5 55.5t-3 72.5 t25 90t68.5 87q97 88 263 88q129 0 230 -89t101 -208h-153q0 52 -34 89.5t-74 51.5t-76 14q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -11 2.5 -24.5t5.5 -24t9.5 -26.5t10.5 -25t14 -27.5t14 -25.5t15.5 -27t13.5 -24h242v-100h-197q8 -50 -2.5 -115t-31.5 -94 q-41 -59 -99 -113q35 11 84 18t70 7q32 1 102 -16t104 -17q76 0 136 30z" />
+<glyph unicode="&#xe150;" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1200l298 -300h-198v-900h-200v900h-198z" />
+<glyph unicode="&#xe151;" d="M400 300h198l-298 -300l-298 300h198v900h200v-900zM1000 1200v-500h-100v100h-100v-100h-100v500h300zM901 1100h-100v-200h100v200zM700 500h300v-200h-99v-100h-100v100h99v100h-200v100zM800 100h200v-100h-300v200h100v-100z" />
+<glyph unicode="&#xe152;" d="M400 300h198l-298 -300l-298 300h198v900h200v-900zM1000 1200v-200h-99v-100h-100v100h99v100h-200v100h300zM800 800h200v-100h-300v200h100v-100zM700 500h300v-500h-100v100h-100v-100h-100v500zM801 200h100v200h-100v-200z" />
+<glyph unicode="&#xe153;" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1100h-100v100h200v-500h-100v400zM1100 500v-500h-100v100h-200v400h300zM1001 400h-100v-200h100v200z" />
+<glyph unicode="&#xe154;" d="M300 0l298 300h-198v900h-200v-900h-198zM1100 1200v-500h-100v100h-200v400h300zM1001 1100h-100v-200h100v200zM900 400h-100v100h200v-500h-100v400z" />
+<glyph unicode="&#xe155;" d="M300 0l298 300h-198v900h-200v-900h-198zM900 1000h-200v200h200v-200zM1000 700h-300v200h300v-200zM1100 400h-400v200h400v-200zM1200 100h-500v200h500v-200z" />
+<glyph unicode="&#xe156;" d="M300 0l298 300h-198v900h-200v-900h-198zM1200 1000h-500v200h500v-200zM1100 700h-400v200h400v-200zM1000 400h-300v200h300v-200zM900 100h-200v200h200v-200z" />
+<glyph unicode="&#xe157;" d="M400 1100h300q162 0 281 -118.5t119 -281.5v-300q0 -165 -118.5 -282.5t-281.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5v300q0 165 117.5 282.5t282.5 117.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5 t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5z" />
+<glyph unicode="&#xe158;" d="M700 0h-300q-163 0 -281.5 117.5t-118.5 282.5v300q0 163 119 281.5t281 118.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5zM800 900h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5 t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5zM400 800v-500l333 250z" />
+<glyph unicode="&#xe159;" d="M0 400v300q0 163 117.5 281.5t282.5 118.5h300q163 0 281.5 -119t118.5 -281v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM900 300v500q0 41 -29.5 70.5t-70.5 29.5h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5zM800 700h-500l250 -333z" />
+<glyph unicode="&#xe160;" d="M1100 700v-300q0 -162 -118.5 -281t-281.5 -119h-300q-165 0 -282.5 118.5t-117.5 281.5v300q0 165 117.5 282.5t282.5 117.5h300q165 0 282.5 -117.5t117.5 -282.5zM900 300v500q0 41 -29.5 70.5t-70.5 29.5h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5 t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5zM550 733l-250 -333h500z" />
+<glyph unicode="&#xe161;" d="M500 1100h400q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-400v200h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-500v200zM700 550l-400 -350v200h-300v300h300v200z" />
+<glyph unicode="&#xe162;" d="M403 2l9 -1q13 0 26 16l538 630q15 19 6 36q-8 18 -32 16h-300q1 4 78 219.5t79 227.5q2 17 -6 27l-8 8h-9q-16 0 -25 -15q-4 -5 -98.5 -111.5t-228 -257t-209.5 -238.5q-17 -19 -7 -40q10 -19 32 -19h302q-155 -438 -160 -458q-5 -21 4 -32z" />
+<glyph unicode="&#xe163;" d="M800 200h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h500v185q-14 4 -114 7.5t-193 5.5l-93 2q-165 0 -282.5 -117.5t-117.5 -282.5v-300q0 -165 117.5 -282.5t282.5 -117.5h300q47 0 100 15v185zM900 200v200h-300v300h300v200l400 -350z" />
+<glyph unicode="&#xe164;" d="M1200 700l-149 149l-342 -353l-213 213l353 342l-149 149h500v-500zM1022 571l-122 -123v-148q0 -41 -29.5 -70.5t-70.5 -29.5h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h156l118 122l-74 78h-100q-165 0 -282.5 -117.5t-117.5 -282.5v-300 q0 -165 117.5 -282.5t282.5 -117.5h300q163 0 281.5 117.5t118.5 282.5v98z" />
+<glyph unicode="&#xe165;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM600 794 q80 0 137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137t57 137t137 57z" />
+<glyph unicode="&#xe166;" d="M700 800v400h-300v-400h-300l445 -500l450 500h-295zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
+<glyph unicode="&#xe167;" d="M400 700v-300h300v300h295l-445 500l-450 -500h300zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
+<glyph unicode="&#xe168;" d="M405 400l596 596l-154 155l-442 -442l-150 151l-155 -155zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
+<glyph unicode="&#xe169;" d="M409 1103l-97 97l-212 -212l97 -98zM650 861l-149 149l-212 -212l149 -149l-238 -248h700v699zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
+<glyph unicode="&#xe170;" d="M539 950l-149 -149l212 -212l149 148l248 -237v700h-699zM297 709l-97 -97l212 -212l98 97zM25 300h1048q11 0 19 -7.5t8 -17.5v-275h-1100v275q0 11 7 18t18 7zM1000 200h-100v-50h100v50z" />
+<glyph unicode="&#xe171;" d="M1200 1199v-1079l-475 272l-310 -393v416h-392zM1166 1148l-672 -712v-226z" />
+<glyph unicode="&#xe172;" d="M1100 1000v-850q0 -21 -15 -35.5t-35 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100zM700 1200h-100v-200h100v200z" />
+<glyph unicode="&#xe173;" d="M578 500h-378v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-218l-276 -275l-120 120zM700 1200h-100v-200h100v200zM1300 538l-475 -476l-244 244l123 123l120 -120l353 352z" />
+<glyph unicode="&#xe174;" d="M529 500h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-269l-103 -103l-170 170zM700 1200h-100v-200h100v200zM1167 6l-170 170l-170 -170l-127 127l170 170l-170 170l127 127l170 -170l170 170l127 -128 l-170 -169l170 -170z" />
+<glyph unicode="&#xe175;" d="M700 500h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-300h-400v-200zM700 1000h-100v200h100v-200zM1000 600h-200v-300h-200l300 -300l300 300h-200v300z" />
+<glyph unicode="&#xe176;" d="M602 500h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-402l-200 200zM700 1000h-100v200h100v-200zM1000 300h200l-300 300l-300 -300h200v-300h200v300z" />
+<glyph unicode="&#xe177;" d="M1200 900v150q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-150h1200zM0 800v-550q0 -21 14.5 -35.5t35.5 -14.5h1100q21 0 35.5 14.5t14.5 35.5v550h-1200zM100 500h400v-200h-400v200z" />
+<glyph unicode="&#xe178;" d="M500 1000h400v198l300 -298l-300 -298v198h-400v200zM100 800v200h100v-200h-100zM400 800h-100v200h100v-200zM700 300h-400v-198l-300 298l300 298v-198h400v-200zM800 500h100v-200h-100v200zM1000 500v-200h100v200h-100z" />
+<glyph unicode="&#xe179;" d="M1200 50v1106q0 31 -18 40.5t-44 -7.5l-276 -117q-25 -16 -43.5 -50.5t-18.5 -65.5v-359q0 -29 10.5 -55.5t25 -43t29 -28.5t25.5 -18l10 -5v-397q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5zM550 1200l50 -100v-400l-100 -203v-447q0 -21 -14.5 -35.5 t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447l-100 203v400l50 100l50 -100v-300h100v300l50 100l50 -100v-300h100v300z" />
+<glyph unicode="&#xe180;" d="M1100 106v888q0 22 25 34.5t50 13.5l25 2v56h-400v-56q75 0 87.5 -6t12.5 -44v-394h-500v394q0 38 12.5 44t87.5 6v56h-400v-56q4 0 11 -0.5t24 -3t30 -7t24 -15t11 -24.5v-888q0 -22 -25 -34.5t-50 -13.5l-25 -2v-56h400v56q-75 0 -87.5 6t-12.5 44v394h500v-394 q0 -38 -12.5 -44t-87.5 -6v-56h400v56q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5z" />
+<glyph unicode="&#xe181;" d="M675 1000l-100 100h-375l-100 -100h400l200 -200v-98l295 98h105v200h-425zM500 300v500q0 41 -29.5 70.5t-70.5 29.5h-300q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h300q41 0 70.5 29.5t29.5 70.5zM100 800h300v-200h-300v200zM700 565l400 133 v-163l-400 -133v163zM100 500h300v-200h-300v200zM805 300l295 98v-298h-425l-100 -100h-375l-100 100h400l200 200h105z" />
+<glyph unicode="&#xe182;" d="M179 1169l-162 -162q-1 -11 -0.5 -32.5t16 -90t46.5 -140t104 -177.5t175 -208q103 -103 207.5 -176t180 -103.5t137 -47t92.5 -16.5l31 1l163 162q16 17 13 40.5t-22 37.5l-192 136q-19 14 -45 12t-42 -19l-119 -118q-143 103 -267 227q-126 126 -227 268l118 118 q17 17 20 41.5t-11 44.5l-139 194q-14 19 -36.5 22t-40.5 -14z" />
+<glyph unicode="&#xe183;" d="M1200 712v200q-6 8 -19 20.5t-63 45t-112 57t-171 45t-235 20.5q-92 0 -175 -10.5t-141.5 -27t-108.5 -36.5t-81.5 -40t-53.5 -36.5t-31 -27.5l-9 -10v-200q0 -21 14.5 -33.5t34.5 -8.5l202 33q20 4 34.5 21t14.5 38v146q141 24 300 24t300 -24v-146q0 -21 14.5 -38 t34.5 -21l202 -33q20 -4 34.5 8.5t14.5 33.5zM800 650l365 -303q14 -14 24.5 -39.5t10.5 -45.5v-212q0 -21 -15 -35.5t-35 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5v212q0 20 10.5 45.5t24.5 39.5l365 303v50q0 4 1 10.5t12 22.5t30 28.5t60 23t97 10.5t97 -10t60 -23.5 t30 -27.5t12 -24l1 -10v-50z" />
+<glyph unicode="&#xe184;" d="M175 200h950l-125 150v250l100 100v400h-100v-200h-100v200h-200v-200h-100v200h-200v-200h-100v200h-100v-400l100 -100v-250zM1200 100v-100h-1100v100h1100z" />
+<glyph unicode="&#xe185;" d="M600 1100h100q41 0 70.5 -29.5t29.5 -70.5v-1000h-300v1000q0 41 29.5 70.5t70.5 29.5zM1000 800h100q41 0 70.5 -29.5t29.5 -70.5v-700h-300v700q0 41 29.5 70.5t70.5 29.5zM400 0v400q0 41 -29.5 70.5t-70.5 29.5h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-400h300z" />
+<glyph unicode="&#xe186;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-300h200v-100h-200v-100h300v300h-200v100h200v100h-300zM800 800h-200v-500h200v100h100v300h-100 v100zM800 700v-300h-100v300h100z" />
+<glyph unicode="&#xe187;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM400 600h-100v200h-100v-500h100v200h100v-200h100v500h-100v-200zM800 800h-200v-500h200v100h100v300h-100 v100zM800 700v-300h-100v300h100z" />
+<glyph unicode="&#xe188;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-500h300v100h-200v300h200v100h-300zM600 800v-500h300v100h-200v300h200v100h-300z" />
+<glyph unicode="&#xe189;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM500 700l-300 -150l300 -150v300zM600 400l300 150l-300 150v-300z" />
+<glyph unicode="&#xe190;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM900 800v-500h-700v500h700zM300 400h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130v-300zM800 700h-130 q-38 0 -66.5 -43t-28.5 -108t27 -107t68 -42h130v300z" />
+<glyph unicode="&#xe191;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM200 800v-300h200v-100h-200v-100h300v300h-200v100h200v100h-300zM800 300h100v500h-200v-100h100v-400z M601 300h100v100h-100v-100z" />
+<glyph unicode="&#xe192;" d="M1200 800v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212zM1000 900h-900v-700h900v700zM300 700v100h-100v-500h300v400h-200zM800 300h100v500h-200v-100h100v-400zM401 400h-100v200h100v-200z M601 300h100v100h-100v-100z" />
+<glyph unicode="&#xe193;" d="M200 1100h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212v500q0 124 88 212t212 88zM1000 900h-900v-700h900v700zM400 700h-200v100h300v-300h-99v-100h-100v100h99v200zM800 700h-100v100h200v-500h-100v400zM201 400h100v-100 h-100v100zM701 300h-100v100h100v-100z" />
+<glyph unicode="&#xe194;" d="M600 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM600 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM800 700h-300 v-200h300v-100h-300l-100 100v200l100 100h300v-100z" />
+<glyph unicode="&#xe195;" d="M596 1196q162 0 299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299t80 299t217 217t299 80zM596 1014q-171 0 -292.5 -121.5t-121.5 -292.5t121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5zM800 700v-100 h-100v100h-200v-100h200v-100h-200v-100h-100v400h300zM800 400h-100v100h100v-100z" />
+<glyph unicode="&#xe197;" d="M800 300h128q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57h222v300h400v-300zM700 200h200l-300 -300 l-300 300h200v300h200v-300z" />
+<glyph unicode="&#xe198;" d="M600 714l403 -403q94 26 154.5 104t60.5 178q0 121 -85 207.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5q0 -80 56.5 -137t135.5 -57h8zM700 -100h-200v300h-200l300 300 l300 -300h-200v-300z" />
+<glyph unicode="&#xe199;" d="M700 200h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170l-270 -300h400v-155l-75 -45h350l-75 45v155z" />
+<glyph unicode="&#xe200;" d="M700 45v306q46 -30 100 -30q74 0 126.5 52.5t52.5 126.5q0 24 -9 55q50 32 79.5 83t29.5 112q0 90 -61.5 155.5t-150.5 71.5q-26 89 -99.5 145.5t-167.5 56.5q-116 0 -197.5 -81.5t-81.5 -197.5q0 -4 1 -12t1 -11q-14 2 -23 2q-74 0 -126.5 -52.5t-52.5 -126.5 q0 -53 28.5 -97t75.5 -65q-4 -16 -4 -38q0 -74 52.5 -126.5t126.5 -52.5q56 0 100 30v-306l-75 -45h350z" />
+<glyph unicode="&#x1f4bc;" d="M800 1000h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5h200q41 0 70.5 -29.5t29.5 -70.5v-100zM500 1000h200v100h-200v-100zM1200 400v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5v200h1200z" />
+<glyph unicode="&#x1f4c5;" d="M1100 900v150q0 21 -14.5 35.5t-35.5 14.5h-150v100h-100v-100h-500v100h-100v-100h-150q-21 0 -35.5 -14.5t-14.5 -35.5v-150h1100zM0 800v-750q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v750h-1100zM100 600h100v-100h-100v100zM300 600h100v-100h-100v100z M500 600h100v-100h-100v100zM700 600h100v-100h-100v100zM900 600h100v-100h-100v100zM100 400h100v-100h-100v100zM300 400h100v-100h-100v100zM500 400h100v-100h-100v100zM700 400h100v-100h-100v100zM900 400h100v-100h-100v100zM100 200h100v-100h-100v100zM300 200 h100v-100h-100v100zM500 200h100v-100h-100v100zM700 200h100v-100h-100v100zM900 200h100v-100h-100v100z" />
+<glyph unicode="&#x1f4cc;" d="M902 1185l283 -282q15 -15 15 -36t-15 -35q-14 -15 -35 -15t-35 15l-36 35l-279 -267v-300l-212 210l-208 -207l-380 -303l303 380l207 208l-210 212h300l267 279l-35 36q-15 14 -15 35t15 35q14 15 35 15t35 -15z" />
+<glyph unicode="&#x1f4ce;" d="M518 119l69 -60l517 511q67 67 95 157t11 183q-16 87 -67 154t-130 103q-69 33 -152 33q-107 0 -197 -55q-40 -24 -111 -95l-512 -512q-68 -68 -81 -163t35 -173q35 -57 94 -89t129 -32q63 0 119 28q33 16 65 40.5t52.5 45.5t59.5 64q40 44 57 61l394 394q35 35 47 84 t-3 96q-27 87 -117 104q-20 2 -29 2q-46 0 -79.5 -17t-67.5 -51l-388 -396l-7 -7l69 -67l377 373q20 22 39 38q23 23 50 23q38 0 53 -36q16 -39 -20 -75l-547 -547q-52 -52 -125 -52q-55 0 -100 33t-54 96q-5 35 2.5 66t31.5 63t42 50t56 54q24 21 44 41l348 348 q52 52 82.5 79.5t84 54t107.5 26.5q25 0 48 -4q95 -17 154 -94.5t51 -175.5q-7 -101 -98 -192l-252 -249l-253 -256z" />
+<glyph unicode="&#x1f4f7;" d="M1200 200v600q0 41 -29.5 70.5t-70.5 29.5h-150q-4 8 -11.5 21.5t-33 48t-53 61t-69 48t-83.5 21.5h-200q-41 0 -82 -20.5t-70 -50t-52 -59t-34 -50.5l-12 -20h-150q-41 0 -70.5 -29.5t-29.5 -70.5v-600q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5z M1000 700h-100v100h100v-100zM844 500q0 -100 -72 -172t-172 -72t-172 72t-72 172t72 172t172 72t172 -72t72 -172zM706 500q0 44 -31 75t-75 31t-75 -31t-31 -75t31 -75t75 -31t75 31t31 75z" />
+<glyph unicode="&#x1f512;" d="M900 800h100q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-900q-41 0 -70.5 29.5t-29.5 70.5v600q0 41 29.5 70.5t70.5 29.5h100v200q0 82 59 141t141 59h300q82 0 141 -59t59 -141v-200zM400 800h300v150q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-150z" />
+<glyph unicode="&#x1f514;" d="M1062 400h17q20 0 33.5 -14.5t13.5 -35.5q0 -20 -13 -40t-31 -27q-22 -9 -63 -23t-167.5 -37t-251.5 -23t-245.5 20.5t-178.5 41.5l-58 20q-18 7 -31 27.5t-13 40.5q0 21 13.5 35.5t33.5 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3 32t29 13h94 q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327zM600 104q-54 0 -103 6q12 -49 40 -79.5t63 -30.5t63 30.5t39 79.5q-48 -6 -102 -6z" />
+<glyph unicode="&#x1f516;" d="M200 0l450 444l450 -443v1150q0 20 -14.5 35t-35.5 15h-800q-21 0 -35.5 -15t-14.5 -35v-1151z" />
+<glyph unicode="&#x1f525;" d="M400 755q2 -12 8 -41.5t8 -43t6 -39.5t3.5 -39.5t-1 -33.5t-6 -31.5t-13.5 -24t-21 -20.5t-31 -12q-38 -10 -67 13t-40.5 61.5t-15 81.5t10.5 75q-52 -46 -83.5 -101t-39 -107t-7.5 -85t5 -63q9 -56 44 -119.5t105 -108.5q31 -21 64 -16t62 23.5t57 49.5t48 61.5t35 60.5 q32 66 39 184.5t-13 157.5q79 -80 122 -164t26 -184q-5 -33 -20.5 -69.5t-37.5 -80.5q-10 -19 -14.5 -29t-12 -26t-9 -23.5t-3 -19t2.5 -15.5t11 -9.5t19.5 -5t30.5 2.5t42 8q57 20 91 34t87.5 44.5t87 64t65.5 88.5t47 122q38 172 -44.5 341.5t-246.5 278.5q22 -44 43 -129 q39 -159 -32 -154q-15 2 -33 9q-79 33 -120.5 100t-44 175.5t48.5 257.5q-13 -8 -34 -23.5t-72.5 -66.5t-88.5 -105.5t-60 -138t-8 -166.5z" />
+<glyph unicode="&#x1f527;" d="M948 778l251 126q13 -175 -151 -267q-123 -70 -253 -23l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5t15 37.5l600 599q-33 101 6 201.5t135 154.5q164 92 306 -9l-259 -138z" />
+</font>
+</defs></svg> 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.ttf
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.ttf b/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.ttf
new file mode 100755
index 0000000..be784dc
Binary files /dev/null and b/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.ttf differ

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.woff
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.woff b/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.woff
new file mode 100755
index 0000000..2cc3e48
Binary files /dev/null and b/service/src/resources/hive-webapps/static/fonts/glyphicons-halflings-regular.woff differ

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/service/src/resources/hive-webapps/static/hive_logo.jpeg
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/static/hive_logo.jpeg b/service/src/resources/hive-webapps/static/hive_logo.jpeg
new file mode 100644
index 0000000..8c4a5df
Binary files /dev/null and b/service/src/resources/hive-webapps/static/hive_logo.jpeg differ

http://git-wip-us.apache.org/repos/asf/hive/blob/eb1b80d9/spark-client/pom.xml
----------------------------------------------------------------------
diff --git a/spark-client/pom.xml b/spark-client/pom.xml
index 9d2b418..0405016 100644
--- a/spark-client/pom.xml
+++ b/spark-client/pom.xml
@@ -54,6 +54,12 @@
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-common</artifactId>
       <version>${project.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.eclipse.jetty.aggregate</groupId>
+          <artifactId>jetty-all</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.spark</groupId>


[16/27] hive git commit: HIVE-12473 : DPP: UDFs on the partition column side does not evaluate correctly (Sergey Shelukhin, reviewed by Gopal V)

Posted by om...@apache.org.
HIVE-12473 : DPP: UDFs on the partition column side does not evaluate correctly (Sergey Shelukhin, reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c7a939ad
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c7a939ad
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c7a939ad

Branch: refs/heads/master-fixed
Commit: c7a939ad44394f56ca8818d4cac064c62cb4c025
Parents: 39a8252
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed Nov 25 12:15:29 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:36 2015 -0800

----------------------------------------------------------------------
 .../ql/exec/tez/DynamicPartitionPruner.java     | 40 ++++++++++++++------
 1 file changed, 29 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c7a939ad/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java
index b67ac8d..60b71aa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java
@@ -18,6 +18,12 @@
 
 package org.apache.hadoop.hive.ql.exec.tez;
 
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -242,25 +248,37 @@ public class DynamicPartitionPruner {
       LOG.debug(sb.toString());
     }
 
-    ObjectInspector oi =
-        PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(TypeInfoFactory
-            .getPrimitiveTypeInfo(si.fieldInspector.getTypeName()));
+    ObjectInspector targetOi = findTargetOi(si.partKey, si.columnName);
+    Converter converter = ObjectInspectorConverters.getConverter(
+            PrimitiveObjectInspectorFactory.javaStringObjectInspector, targetOi);
 
-    Converter converter =
-        ObjectInspectorConverters.getConverter(
-            PrimitiveObjectInspectorFactory.javaStringObjectInspector, oi);
-
-    StructObjectInspector soi =
-        ObjectInspectorFactory.getStandardStructObjectInspector(
-            Collections.singletonList(columnName), Collections.singletonList(oi));
+    StructObjectInspector soi = ObjectInspectorFactory.getStandardStructObjectInspector(
+            Collections.singletonList(columnName), Collections.singletonList(targetOi));
 
     @SuppressWarnings("rawtypes")
     ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(si.partKey);
-    eval.initialize(soi);
+    eval.initialize(soi); // We expect the row with just the relevant column.
 
     applyFilterToPartitions(converter, eval, columnName, values);
   }
 
+  private ObjectInspector findTargetOi(ExprNodeDesc expr, String columnName) {
+    if (expr instanceof ExprNodeColumnDesc) {
+      ExprNodeColumnDesc colExpr = (ExprNodeColumnDesc)expr;
+      // TODO: this is not necessarily going to work for all cases. At least, table name is needed.
+      //       Also it's not clear if this is going to work with subquery columns and such.
+      if (columnName.equals(colExpr.getColumn())) {
+        return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
+            (PrimitiveTypeInfo)colExpr.getTypeInfo());
+      }
+    }
+    for (ExprNodeDesc child : expr.getChildren()) {
+      ObjectInspector oi = findTargetOi(child, columnName);
+      if (oi != null) return oi;
+    }
+    return null;
+  }
+
   @SuppressWarnings("rawtypes")
   private void applyFilterToPartitions(Converter converter, ExprNodeEvaluator eval,
       String columnName, Set<Object> values) throws HiveException {


[20/27] hive git commit: HIVE-12307 - Streaming API TransactionBatch.close() must abort any remaining transactions in the batch(Eugene Koifman, reviewed by Alan Gates)

Posted by om...@apache.org.
HIVE-12307 - Streaming API TransactionBatch.close() must abort any remaining transactions in the batch(Eugene Koifman, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eb766340
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eb766340
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eb766340

Branch: refs/heads/master-fixed
Commit: eb766340a382eea67b74630fa5f5c4dbe6bd11ce
Parents: 6b72574
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Thu Nov 26 11:48:03 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Mon Nov 30 11:14:37 2015 -0800

----------------------------------------------------------------------
 .../streaming/AbstractRecordWriter.java         |  32 ++-
 .../hcatalog/streaming/ConnectionError.java     |   3 +-
 .../streaming/DelimitedInputWriter.java         |   2 +-
 .../hive/hcatalog/streaming/HiveEndPoint.java   | 211 +++++++++++++------
 .../hcatalog/streaming/StrictJsonWriter.java    |   2 +-
 .../hcatalog/streaming/TransactionBatch.java    |   1 +
 .../hcatalog/streaming/TransactionError.java    |   2 +-
 .../hive/hcatalog/streaming/TestStreaming.java  | 167 +++++++++++++++
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java |   3 +
 9 files changed, 344 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/eb766340/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
index 5c15675..0c6b9ea 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
@@ -65,6 +65,8 @@ public abstract class AbstractRecordWriter implements RecordWriter {
 
   final AcidOutputFormat<?,?> outf;
   private Object[] bucketFieldData; // Pre-allocated in constructor. Updated on each write.
+  private Long curBatchMinTxnId;
+  private Long curBatchMaxTxnId;
 
   protected AbstractRecordWriter(HiveEndPoint endPoint, HiveConf conf)
           throws ConnectionError, StreamingException {
@@ -98,6 +100,12 @@ public abstract class AbstractRecordWriter implements RecordWriter {
     }
   }
 
+  /**
+   * used to tag error msgs to provied some breadcrumbs
+   */
+  String getWatermark() {
+    return partitionPath + " txnIds[" + curBatchMinTxnId + "," + curBatchMaxTxnId + "]";
+  }
   // return the column numbers of the bucketed columns
   private List<Integer> getBucketColIDs(List<String> bucketCols, List<FieldSchema> cols) {
     ArrayList<Integer> result =  new ArrayList<Integer>(bucketCols.size());
@@ -164,22 +172,32 @@ public abstract class AbstractRecordWriter implements RecordWriter {
           throws StreamingIOFailure, SerializationError {
     try {
       LOG.debug("Creating Record updater");
+      curBatchMinTxnId = minTxnId;
+      curBatchMaxTxnId = maxTxnID;
       updaters = createRecordUpdaters(totalBuckets, minTxnId, maxTxnID);
     } catch (IOException e) {
-      LOG.error("Failed creating record updater", e);
-      throw new StreamingIOFailure("Unable to get new record Updater", e);
+      String errMsg = "Failed creating RecordUpdaterS for " + getWatermark();
+      LOG.error(errMsg, e);
+      throw new StreamingIOFailure(errMsg, e);
     }
   }
 
   @Override
   public void closeBatch() throws StreamingIOFailure {
-    try {
-      for (RecordUpdater updater : updaters) {
+    boolean haveError = false;
+    for (RecordUpdater updater : updaters) {
+      try {
+        //try not to leave any files open
         updater.close(false);
       }
-      updaters.clear();
-    } catch (IOException e) {
-      throw new StreamingIOFailure("Unable to close recordUpdater", e);
+      catch(Exception ex) {
+        haveError = true;
+        LOG.error("Unable to close " + updater + " due to: " + ex.getMessage(), ex);
+      }
+    }
+    updaters.clear();
+    if(haveError) {
+      throw new StreamingIOFailure("Encountered errors while closing (see logs) " + getWatermark());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/eb766340/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
index ffa51c9..03f6a44 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ConnectionError.java
@@ -29,6 +29,7 @@ public class ConnectionError extends StreamingException {
   }
 
   public ConnectionError(HiveEndPoint endPoint, Exception innerEx) {
-    super("Error connecting to " + endPoint, innerEx);
+    super("Error connecting to " + endPoint +
+        (innerEx == null ? "" : ": " + innerEx.getMessage()), innerEx);
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/eb766340/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
index 4f1154e..394cc54 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
@@ -243,7 +243,7 @@ public class DelimitedInputWriter extends AbstractRecordWriter {
   }
 
   @Override
-  public SerDe getSerde() throws SerializationError {
+  public SerDe getSerde() {
     return serde;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/eb766340/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index 2f2d44a..4c77842 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -48,6 +48,7 @@ import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
@@ -503,7 +504,6 @@ public class HiveEndPoint {
 
 
   } // class ConnectionImpl
-
   private static class TransactionBatchImpl implements TransactionBatch {
     private final String username;
     private final UserGroupInformation ugi;
@@ -512,27 +512,28 @@ public class HiveEndPoint {
     private final RecordWriter recordWriter;
     private final List<Long> txnIds;
 
-    private int currentTxnIndex;
+    private int currentTxnIndex = -1;
     private final String partNameForLock;
 
     private TxnState state;
     private LockRequest lockRequest = null;
+    /**
+     * once any operation on this batch encounters a system exception
+     * (e.g. IOException on write) it's safest to assume that we can't write to the
+     * file backing this batch any more.  This guards important public methods
+     */
+    private volatile boolean isClosed = false;
 
     /**
      * Represents a batch of transactions acquired from MetaStore
      *
-     * @param user
-     * @param ugi
-     * @param endPt
-     * @param numTxns
-     * @param msClient
-     * @param recordWriter
      * @throws StreamingException if failed to create new RecordUpdater for batch
      * @throws TransactionBatchUnAvailable if failed to acquire a new Transaction batch
      */
     private TransactionBatchImpl(final String user, UserGroupInformation ugi, HiveEndPoint endPt
               , final int numTxns, final IMetaStoreClient msClient, RecordWriter recordWriter)
             throws StreamingException, TransactionBatchUnAvailable, InterruptedException {
+      boolean success = false;
       try {
         if ( endPt.partitionVals!=null   &&   !endPt.partitionVals.isEmpty() ) {
           Table tableObj = msClient.getTable(endPt.database, endPt.table);
@@ -549,15 +550,18 @@ public class HiveEndPoint {
 
         txnIds = openTxnImpl(msClient, user, numTxns, ugi);
 
-
-        this.currentTxnIndex = -1;
         this.state = TxnState.INACTIVE;
         recordWriter.newBatch(txnIds.get(0), txnIds.get(txnIds.size()-1));
+        success = true;
       } catch (TException e) {
         throw new TransactionBatchUnAvailable(endPt, e);
       } catch (IOException e) {
         throw new TransactionBatchUnAvailable(endPt, e);
       }
+      finally {
+        //clean up if above throws
+        markDead(success);
+      }
     }
 
     private List<Long> openTxnImpl(final IMetaStoreClient msClient, final String user, final int numTxns, UserGroupInformation ugi)
@@ -589,6 +593,7 @@ public class HiveEndPoint {
     @Override
     public void beginNextTransaction() throws TransactionError, ImpersonationFailed,
             InterruptedException {
+      checkIsClosed();
       if (ugi==null) {
         beginNextTransactionImpl();
         return;
@@ -610,10 +615,12 @@ public class HiveEndPoint {
     }
 
     private void beginNextTransactionImpl() throws TransactionError {
+      state = TxnState.INACTIVE;//clear state from previous txn
       if ( currentTxnIndex >= txnIds.size() )
         throw new InvalidTrasactionState("No more transactions available in" +
                 " current batch for end point : " + endPt);
       ++currentTxnIndex;
+      state = TxnState.OPEN;
       lockRequest = createLockRequest(endPt, partNameForLock, username, getCurrentTxnId());
       try {
         LockResponse res = msClient.lock(lockRequest);
@@ -623,8 +630,6 @@ public class HiveEndPoint {
       } catch (TException e) {
         throw new TransactionError("Unable to acquire lock on " + endPt, e);
       }
-
-      state = TxnState.OPEN;
     }
 
     /**
@@ -640,7 +645,7 @@ public class HiveEndPoint {
     }
 
     /**
-     * get state of current tramsaction
+     * get state of current transaction
      * @return
      */
     @Override
@@ -672,26 +677,35 @@ public class HiveEndPoint {
      */
     @Override
     public void write(final byte[] record)
-            throws StreamingException, InterruptedException,
-            ImpersonationFailed {
-      if (ugi==null) {
-        recordWriter.write(getCurrentTxnId(), record);
+            throws StreamingException, InterruptedException {
+      write(Collections.singletonList(record));
+    }
+    private void checkIsClosed() throws IllegalStateException {
+      if(isClosed) {
+        throw new IllegalStateException("TransactionBatch " + toString() + " has been closed()");
+      }
+    }
+    /**
+     * A transaction batch opens a single HDFS file and writes multiple transaction to it.  If there is any issue
+     * with the write, we can't continue to write to the same file any as it may be corrupted now (at the tail).
+     * This ensures that a client can't ignore these failures and continue to write.
+     */
+    private void markDead(boolean success) {
+      if(success) {
         return;
       }
+      isClosed = true;//also ensures that heartbeat() is no-op since client is likely doing it async
       try {
-        ugi.doAs (
-            new PrivilegedExceptionAction<Void>() {
-              @Override
-              public Void run() throws StreamingException {
-                recordWriter.write(getCurrentTxnId(), record);
-                return null;
-              }
-            }
-        );
-      } catch (IOException e) {
-        throw new ImpersonationFailed("Failed wirting as user '" + username +
-                "' to endPoint :" + endPt + ". Transaction Id: "
-                + getCurrentTxnId(), e);
+        abort(true);//abort all remaining txns
+      }
+      catch(Exception ex) {
+        LOG.error("Fatal error on " + toString() + "; cause " + ex.getMessage(), ex);
+      }
+      try {
+        closeImpl();
+      }
+      catch (Exception ex) {
+        LOG.error("Fatal error on " + toString() + "; cause " + ex.getMessage(), ex);
       }
     }
 
@@ -707,24 +721,37 @@ public class HiveEndPoint {
     public void write(final Collection<byte[]> records)
             throws StreamingException, InterruptedException,
             ImpersonationFailed {
-      if (ugi==null) {
-        writeImpl(records);
-        return;
-      }
+      checkIsClosed();
+      boolean success = false;
       try {
-        ugi.doAs (
-                new PrivilegedExceptionAction<Void>() {
-                  @Override
-                  public Void run() throws StreamingException {
-                    writeImpl(records);
-                    return null;
-                  }
-                }
-        );
-      } catch (IOException e) {
+        if (ugi == null) {
+          writeImpl(records);
+        } else {
+          ugi.doAs(
+            new PrivilegedExceptionAction<Void>() {
+              @Override
+              public Void run() throws StreamingException {
+                writeImpl(records);
+                return null;
+              }
+            }
+          );
+        }
+        success = true;
+      } catch(SerializationError ex) {
+        //this exception indicates that a {@code record} could not be parsed and the
+        //caller can decide whether to drop it or send it to dead letter queue.
+        //rolling back the txn and retrying won't help since the tuple will be exactly the same
+        //when it's replayed.
+        success = true;
+        throw ex;
+      } catch(IOException e){
         throw new ImpersonationFailed("Failed writing as user '" + username +
-                "' to endPoint :" + endPt + ". Transaction Id: "
-                + getCurrentTxnId(), e);
+          "' to endPoint :" + endPt + ". Transaction Id: "
+          + getCurrentTxnId(), e);
+      }
+      finally {
+        markDead(success);
       }
     }
 
@@ -746,25 +773,31 @@ public class HiveEndPoint {
     @Override
     public void commit()  throws TransactionError, StreamingException,
            ImpersonationFailed, InterruptedException {
-      if (ugi==null) {
-        commitImpl();
-        return;
-      }
+      checkIsClosed();
+      boolean success = false;
       try {
-        ugi.doAs (
-              new PrivilegedExceptionAction<Void>() {
-                @Override
-                public Void run() throws StreamingException {
-                  commitImpl();
-                  return null;
-                }
+        if (ugi == null) {
+          commitImpl();
+        }
+        else {
+          ugi.doAs(
+            new PrivilegedExceptionAction<Void>() {
+              @Override
+              public Void run() throws StreamingException {
+                commitImpl();
+                return null;
               }
-        );
+            }
+          );
+        }
+        success = true;
       } catch (IOException e) {
         throw new ImpersonationFailed("Failed committing Txn ID " + getCurrentTxnId() + " as user '"
                 + username + "'on endPoint :" + endPt + ". Transaction Id: ", e);
       }
-
+      finally {
+        markDead(success);
+      }
     }
 
     private void commitImpl() throws TransactionError, StreamingException {
@@ -791,8 +824,20 @@ public class HiveEndPoint {
     @Override
     public void abort() throws TransactionError, StreamingException
                       , ImpersonationFailed, InterruptedException {
+      if(isClosed) {
+        /**
+         * isDead is only set internally by this class.  {@link #markDead(boolean)} will abort all
+         * remaining txns, so make this no-op to make sure that a well-behaved client that calls abort()
+         * error doesn't get misleading errors
+         */
+        return;
+      }
+      abort(false);
+    }
+    private void abort(final boolean abortAllRemaining) throws TransactionError, StreamingException
+        , ImpersonationFailed, InterruptedException {
       if (ugi==null) {
-        abortImpl();
+        abortImpl(abortAllRemaining);
         return;
       }
       try {
@@ -800,7 +845,7 @@ public class HiveEndPoint {
                 new PrivilegedExceptionAction<Void>() {
                   @Override
                   public Void run() throws StreamingException {
-                    abortImpl();
+                    abortImpl(abortAllRemaining);
                     return null;
                   }
                 }
@@ -811,11 +856,26 @@ public class HiveEndPoint {
       }
     }
 
-    private void abortImpl() throws TransactionError, StreamingException {
+    private void abortImpl(boolean abortAllRemaining) throws TransactionError, StreamingException {
       try {
-        recordWriter.clear();
-        msClient.rollbackTxn(getCurrentTxnId());
+        if(abortAllRemaining) {
+          //when last txn finished (abort/commit) the currentTxnIndex is pointing at that txn
+          //so we need to start from next one, if any.  Also if batch was created but
+          //fetchTransactionBatch() was never called, we want to start with first txn
+          int minOpenTxnIndex = Math.max(currentTxnIndex +
+            (state == TxnState.ABORTED || state == TxnState.COMMITTED ? 1 : 0), 0);
+          for(currentTxnIndex = minOpenTxnIndex;
+              currentTxnIndex < txnIds.size(); currentTxnIndex++) {
+            msClient.rollbackTxn(txnIds.get(currentTxnIndex));
+          }
+        }
+        else {
+          if (getCurrentTxnId() > 0) {
+            msClient.rollbackTxn(getCurrentTxnId());
+          }
+        }
         state = TxnState.ABORTED;
+        recordWriter.clear();
       } catch (NoSuchTxnException e) {
         throw new TransactionError("Unable to abort invalid transaction id : "
                 + getCurrentTxnId(), e);
@@ -827,6 +887,9 @@ public class HiveEndPoint {
 
     @Override
     public void heartbeat() throws StreamingException, HeartBeatFailure {
+      if(isClosed) {
+        return;
+      }
       Long first = txnIds.get(currentTxnIndex);
       Long last = txnIds.get(txnIds.size()-1);
       try {
@@ -840,14 +903,27 @@ public class HiveEndPoint {
       }
     }
 
+    @Override
+    public boolean isClosed() {
+      return isClosed;
+    }
     /**
-     * Close the TransactionBatch
+     * Close the TransactionBatch.  This will abort any still open txns in this batch.
      * @throws StreamingIOFailure I/O failure when closing transaction batch
      */
     @Override
     public void close() throws StreamingException, ImpersonationFailed, InterruptedException {
-      if (ugi==null) {
-        state = TxnState.INACTIVE;
+      if(isClosed) {
+        return;
+      }
+      isClosed = true;
+      abortImpl(true);//abort proactively so that we don't wait for timeout
+      closeImpl();//perhaps we should add a version of RecordWriter.closeBatch(boolean abort) which
+      //will call RecordUpdater.close(boolean abort)
+    }
+    private void closeImpl() throws StreamingException, InterruptedException{
+      state = TxnState.INACTIVE;
+      if(ugi == null) {
         recordWriter.closeBatch();
         return;
       }
@@ -856,7 +932,6 @@ public class HiveEndPoint {
                 new PrivilegedExceptionAction<Void>() {
                   @Override
                   public Void run() throws StreamingException {
-                    state = TxnState.INACTIVE;
                     recordWriter.closeBatch();
                     return null;
                   }

http://git-wip-us.apache.org/repos/asf/hive/blob/eb766340/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
index 28ea7d6..db73d6b 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
@@ -86,7 +86,7 @@ public class StrictJsonWriter extends AbstractRecordWriter {
   }
 
   @Override
-  public SerDe getSerde() throws SerializationError {
+  public SerDe getSerde() {
     return serde;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/eb766340/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
index d9a083d..3c8670d 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
@@ -100,4 +100,5 @@ public interface TransactionBatch  {
    * @throws InterruptedException if call in interrupted
    */
   public void close() throws StreamingException, InterruptedException;
+  public boolean isClosed();
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/eb766340/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionError.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionError.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionError.java
index dd9c83d..198d077 100644
--- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionError.java
+++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionError.java
@@ -20,7 +20,7 @@ package org.apache.hive.hcatalog.streaming;
 
 public class TransactionError extends StreamingException {
   public TransactionError(String msg, Exception e) {
-    super(msg, e);
+    super(msg + (e == null ? "" : ": " + e.getMessage()), e);
   }
 
   public TransactionError(String msg) {

http://git-wip-us.apache.org/repos/asf/hive/blob/eb766340/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index 1723ff1..d38cdc0 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -29,11 +29,14 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnInfo;
+import org.apache.hadoop.hive.metastore.api.TxnState;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 import org.apache.hadoop.hive.ql.CommandNeedRetryException;
 import org.apache.hadoop.hive.ql.Driver;
@@ -1189,7 +1192,120 @@ public class TestStreaming {
 
 
   }
+  private void runCmdOnDriver(String cmd) throws QueryFailedException {
+    boolean t = runDDL(driver, cmd);
+    Assert.assertTrue(cmd + " failed", t);
+  }
+  
+
+  @Test
+  public void testErrorHandling() throws Exception {
+    runCmdOnDriver("create database testErrors");
+    runCmdOnDriver("use testErrors");
+    runCmdOnDriver("create table T(a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
+
+    HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, "testErrors", "T", null);
+    DelimitedInputWriter innerWriter = new DelimitedInputWriter("a,b".split(","),",", endPt);
+    FaultyWriter writer = new FaultyWriter(innerWriter);
+    StreamingConnection connection = endPt.newConnection(false);
+
+    TransactionBatch txnBatch =  connection.fetchTransactionBatch(2, writer);
+    txnBatch.close();
+    txnBatch.heartbeat();//this is no-op on closed batch
+    txnBatch.abort();//ditto
+    GetOpenTxnsInfoResponse r = msClient.showTxns();
+    Assert.assertEquals("HWM didn't match", 2, r.getTxn_high_water_mark());
+    List<TxnInfo> ti = r.getOpen_txns();
+    Assert.assertEquals("wrong status ti(0)", TxnState.ABORTED, ti.get(0).getState());
+    Assert.assertEquals("wrong status ti(1)", TxnState.ABORTED, ti.get(1).getState());
+
+    Exception expectedEx = null;
+    try {
+      txnBatch.beginNextTransaction();
+    }
+    catch(IllegalStateException ex) {
+      expectedEx = ex;
+    }
+    Assert.assertTrue("beginNextTransaction() should have failed",
+      expectedEx != null && expectedEx.getMessage().contains("has been closed()"));
+    expectedEx = null;
+    try {
+      txnBatch.write("name0,1,Hello streaming".getBytes());
+    }
+    catch(IllegalStateException ex) {
+      expectedEx = ex;
+    }
+    Assert.assertTrue("write()  should have failed",
+      expectedEx != null && expectedEx.getMessage().contains("has been closed()"));
+    expectedEx = null;
+    try {
+      txnBatch.commit();
+    }
+    catch(IllegalStateException ex) {
+      expectedEx = ex;
+    }
+    Assert.assertTrue("commit() should have failed",
+      expectedEx != null && expectedEx.getMessage().contains("has been closed()"));
+
+    txnBatch =  connection.fetchTransactionBatch(2, writer);
+    txnBatch.beginNextTransaction();
+    txnBatch.write("name2,2,Welcome to streaming".getBytes());
+    txnBatch.write("name4,2,more Streaming unlimited".getBytes());
+    txnBatch.write("name5,2,even more Streaming unlimited".getBytes());
+    txnBatch.commit();
+    
+    expectedEx = null;
+    txnBatch.beginNextTransaction();
+    writer.enableErrors();
+    try {
+      txnBatch.write("name6,2,Doh!".getBytes());
+    }
+    catch(StreamingIOFailure ex) {
+      expectedEx = ex;
+    }
+    Assert.assertTrue("Wrong exception: " + (expectedEx != null ? expectedEx.getMessage() : "?"),
+      expectedEx != null && expectedEx.getMessage().contains("Simulated fault occurred"));
+    expectedEx = null;
+    try {
+      txnBatch.commit();
+    }
+    catch(IllegalStateException ex) {
+      expectedEx = ex;
+    }
+    Assert.assertTrue("commit() should have failed",
+      expectedEx != null && expectedEx.getMessage().contains("has been closed()"));
+
+    r = msClient.showTxns();
+    Assert.assertEquals("HWM didn't match", 4, r.getTxn_high_water_mark());
+    ti = r.getOpen_txns();
+    Assert.assertEquals("wrong status ti(0)", TxnState.ABORTED, ti.get(0).getState());
+    Assert.assertEquals("wrong status ti(1)", TxnState.ABORTED, ti.get(1).getState());
+    //txnid 3 was committed and thus not open
+    Assert.assertEquals("wrong status ti(2)", TxnState.ABORTED, ti.get(2).getState());
+
+    writer.disableErrors();
+    txnBatch =  connection.fetchTransactionBatch(2, writer);
+    txnBatch.beginNextTransaction();
+    txnBatch.write("name2,2,Welcome to streaming".getBytes());
+    writer.enableErrors();
+    expectedEx = null;
+    try {
+      txnBatch.commit();
+    }
+    catch(StreamingIOFailure ex) {
+      expectedEx = ex;
+    }
+    Assert.assertTrue("Wrong exception: " + (expectedEx != null ? expectedEx.getMessage() : "?"),
+      expectedEx != null && expectedEx.getMessage().contains("Simulated fault occurred"));
+    
+    r = msClient.showTxns();
+    Assert.assertEquals("HWM didn't match", 6, r.getTxn_high_water_mark());
+    ti = r.getOpen_txns();
+    Assert.assertEquals("wrong status ti(3)", TxnState.ABORTED, ti.get(3).getState());
+    Assert.assertEquals("wrong status ti(4)", TxnState.ABORTED, ti.get(4).getState());
 
+    txnBatch.abort();
+  }
 
     // assumes un partitioned table
   // returns a map<bucketNum, list<record> >
@@ -1411,4 +1527,55 @@ public class TestStreaming {
               " }";
     }
   }
+  /**
+   * This is test-only wrapper around the real RecordWriter.
+   * It can simulate faults from lower levels to test error handling logic.
+   */
+  private static final class FaultyWriter implements RecordWriter {
+    private final RecordWriter delegate;
+    private boolean shouldThrow = false;
+
+    private FaultyWriter(RecordWriter delegate) {
+      assert delegate != null;
+      this.delegate = delegate;
+    }
+    @Override
+    public void write(long transactionId, byte[] record) throws StreamingException {
+      delegate.write(transactionId, record);
+      produceFault();
+    }
+    @Override
+    public void flush() throws StreamingException {
+      delegate.flush();
+      produceFault();
+    }
+    @Override
+    public void clear() throws StreamingException {
+      delegate.clear();
+    }
+    @Override
+    public void newBatch(Long minTxnId, Long maxTxnID) throws StreamingException {
+      delegate.newBatch(minTxnId, maxTxnID);
+    }
+    @Override
+    public void closeBatch() throws StreamingException {
+      delegate.closeBatch();
+    }
+
+    /**
+     * allows testing of "unexpected" errors
+     * @throws StreamingIOFailure
+     */
+    private void produceFault() throws StreamingIOFailure {
+      if(shouldThrow) {
+        throw new StreamingIOFailure("Simulated fault occurred");
+      }
+    }
+    void enableErrors() {
+      shouldThrow = true;
+    }
+    void disableErrors() {
+      shouldThrow = false;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/eb766340/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index ee31c23..9098e84 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -268,6 +268,9 @@ public class OrcRecordUpdater implements RecordUpdater {
     item.setFieldValue(ROW_ID, rowId);
   }
 
+  public String toString() {
+    return getClass().getName() + "[" + path +"]";
+  }
   /**
    * To handle multiple INSERT... statements in a single transaction, we want to make sure
    * to generate unique {@code rowId} for all inserted rows of the transaction.