You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by mg...@apache.org on 2020/05/09 13:08:54 UTC

[hive] branch master updated: HIVE-23337 Move q tests to TestMiniLlapLocal from TestCliDriver where the output is different, batch 2 (Miklos Gergely, reviewed by Jesus Camacho Rodriguez)

This is an automated email from the ASF dual-hosted git repository.

mgergely pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 6cafbad  HIVE-23337 Move q tests to TestMiniLlapLocal from TestCliDriver where the output is different, batch 2 (Miklos Gergely, reviewed by Jesus Camacho Rodriguez)
6cafbad is described below

commit 6cafbad8e5ec8d43cac5982376e3ba910ef10bcf
Author: miklosgergely <mg...@cloudera.com>
AuthorDate: Sat May 9 15:08:24 2020 +0200

    HIVE-23337 Move q tests to TestMiniLlapLocal from TestCliDriver where the output is different, batch 2 (Miklos Gergely, reviewed by Jesus Camacho Rodriguez)
---
 .../test/resources/testconfiguration.properties    |  273 +-
 .../queries/clientpositive/groupby_duplicate_key.q |   14 +-
 .../clientpositive/groupby_grouping_sets_limit.q   |   24 +-
 .../clientpositive/groupby_grouping_sets_view.q    |    2 +
 .../test/queries/clientpositive/groupby_sort_9.q   |    2 +
 .../clientpositive/groupingset_high_columns.q      |    2 +
 .../infer_bucket_sort_grouping_operators.q         |    2 +
 ql/src/test/queries/clientpositive/insert0.q       |    6 +-
 ql/src/test/queries/clientpositive/join45.q        |   28 +
 ql/src/test/queries/clientpositive/join47.q        |   28 +
 ql/src/test/queries/clientpositive/join_star.q     |    2 +
 .../test/queries/clientpositive/leadlag_queries.q  |    2 +
 .../test/queries/clientpositive/load_dyn_part11.q  |    2 +
 ql/src/test/queries/clientpositive/masking_1.q     |    2 +
 ql/src/test/queries/clientpositive/masking_2.q     |    2 +
 ql/src/test/queries/clientpositive/masking_3.q     |    2 +
 .../queries/clientpositive/masking_disablecbo_1.q  |    2 +
 .../queries/clientpositive/masking_disablecbo_2.q  |    2 +
 .../queries/clientpositive/masking_disablecbo_3.q  |    2 +
 ...roupby_complex_types_multi_single_reducer.q.out |  266 -
 .../results/clientpositive/groupby_cube1.q.out     |  884 ---
 .../clientpositive/groupby_cube_multi_gby.q.out    |  250 -
 .../clientpositive/groupby_duplicate_key.q.out     |  319 -
 .../clientpositive/groupby_grouping_id3.q.out      |  207 -
 .../clientpositive/groupby_grouping_sets1.q.out    |  620 --
 .../clientpositive/groupby_grouping_sets2.q.out    |  479 --
 .../clientpositive/groupby_grouping_sets3.q.out    |  298 -
 .../clientpositive/groupby_grouping_sets4.q.out    |  603 --
 .../clientpositive/groupby_grouping_sets5.q.out    |  383 -
 .../clientpositive/groupby_grouping_sets6.q.out    |  166 -
 .../groupby_grouping_sets_grouping.q.out           | 1408 ----
 .../groupby_grouping_sets_limit.q.out              |  518 --
 .../clientpositive/groupby_grouping_window.q.out   |  210 -
 .../clientpositive/groupby_join_pushdown.q.out     | 1847 -----
 .../results/clientpositive/groupby_map_ppr.q.out   |  405 -
 .../groupby_map_ppr_multi_distinct.q.out           |  407 -
 .../groupby_multi_insert_common_distinct.q.out     |  297 -
 .../groupby_multi_single_reducer2.q.out            |  283 -
 .../groupby_multi_single_reducer3.q.out            | 1080 ---
 .../clientpositive/groupby_multialias.q.out        |   73 -
 .../results/clientpositive/groupby_position.q.out  | 1891 -----
 .../test/results/clientpositive/groupby_ppd.q.out  |  106 -
 .../test/results/clientpositive/groupby_ppr.q.out  |  392 -
 .../groupby_ppr_multi_distinct.q.out               |  780 --
 .../results/clientpositive/groupby_rollup1.q.out   |  717 --
 .../results/clientpositive/groupby_sort_11.q.out   |  543 --
 .../results/clientpositive/groupby_sort_1_23.q.out | 7809 ------------------
 .../results/clientpositive/groupby_sort_2.q.out    |  185 -
 .../results/clientpositive/groupby_sort_3.q.out    |  370 -
 .../results/clientpositive/groupby_sort_4.q.out    |  341 -
 .../results/clientpositive/groupby_sort_6.q.out    |  883 ---
 .../clientpositive/groupby_sort_skew_1_23.q.out    | 8374 --------------------
 .../clientpositive/groupby_sort_test_1.q.out       |  173 -
 ql/src/test/results/clientpositive/hashjoin.q.out  |  566 --
 ql/src/test/results/clientpositive/having2.q.out   |  662 --
 .../implicit_cast_during_insert.q.out              |  140 -
 .../results/clientpositive/in_typecheck_char.q.out |  416 -
 .../clientpositive/in_typecheck_pointlook.q.out    |  185 -
 .../clientpositive/in_typecheck_varchar.q.out      |  189 -
 .../infer_bucket_sort_num_buckets.q.out            |  532 --
 .../results/clientpositive/infer_join_preds.q.out  | 1328 ----
 .../test/results/clientpositive/innerjoin1.q.out   |  250 -
 ql/src/test/results/clientpositive/input11.q.out   |  243 -
 .../results/clientpositive/input11_limit.q.out     |  115 -
 .../test/results/clientpositive/input1_limit.q.out |  300 -
 ql/src/test/results/clientpositive/input21.q.out   |   84 -
 ql/src/test/results/clientpositive/input22.q.out   |  100 -
 ql/src/test/results/clientpositive/input23.q.out   |  182 -
 ql/src/test/results/clientpositive/input25.q.out   |  181 -
 ql/src/test/results/clientpositive/input26.q.out   |  169 -
 .../test/results/clientpositive/input2_limit.q.out |   60 -
 ql/src/test/results/clientpositive/input30.q.out   |  247 -
 ql/src/test/results/clientpositive/input32.q.out   |  131 -
 .../test/results/clientpositive/input3_limit.q.out |  203 -
 .../test/results/clientpositive/input4_limit.q.out |  101 -
 ql/src/test/results/clientpositive/input6.q.out    |  159 -
 ql/src/test/results/clientpositive/input7.q.out    |  180 -
 ql/src/test/results/clientpositive/input8.q.out    |  181 -
 ql/src/test/results/clientpositive/input9.q.out    |  162 -
 .../test/results/clientpositive/input_part1.q.out  |  535 --
 .../test/results/clientpositive/input_part10.q.out |  189 -
 .../test/results/clientpositive/input_part2.q.out  |  884 ---
 .../test/results/clientpositive/input_part7.q.out  |  587 --
 .../results/clientpositive/input_testxpath.q.out   |  167 -
 .../results/clientpositive/input_testxpath2.q.out  |  170 -
 .../insert2_overwrite_partitions.q.out             |  379 -
 .../clientpositive/insertoverwrite_bucket.q.out    |  373 -
 ql/src/test/results/clientpositive/join11.q.out    |  256 -
 ql/src/test/results/clientpositive/join12.q.out    |  396 -
 ql/src/test/results/clientpositive/join13.q.out    |  329 -
 ql/src/test/results/clientpositive/join16.q.out    |   77 -
 ql/src/test/results/clientpositive/join19.q.out    |  377 -
 ql/src/test/results/clientpositive/join22.q.out    |  125 -
 ql/src/test/results/clientpositive/join23.q.out    |  206 -
 ql/src/test/results/clientpositive/join25.q.out    |  213 -
 ql/src/test/results/clientpositive/join26.q.out    |  651 --
 ql/src/test/results/clientpositive/join27.q.out    |  217 -
 ql/src/test/results/clientpositive/join28.q.out    |  329 -
 ql/src/test/results/clientpositive/join29.q.out    |  376 -
 ql/src/test/results/clientpositive/join30.q.out    |  208 -
 ql/src/test/results/clientpositive/join31.q.out    |  253 -
 ql/src/test/results/clientpositive/join32.q.out    |  629 --
 ql/src/test/results/clientpositive/join33.q.out    |  629 --
 ql/src/test/results/clientpositive/join34.q.out    |  551 --
 ql/src/test/results/clientpositive/join35.q.out    |  732 --
 ql/src/test/results/clientpositive/join37.q.out    |  213 -
 ql/src/test/results/clientpositive/join38.q.out    |  163 -
 ql/src/test/results/clientpositive/join4.q.out     |  223 -
 ql/src/test/results/clientpositive/join41.q.out    |  180 -
 ql/src/test/results/clientpositive/join42.q.out    |  415 -
 ql/src/test/results/clientpositive/join44.q.out    |   92 -
 ql/src/test/results/clientpositive/join45.q.out    | 1979 -----
 ql/src/test/results/clientpositive/join47.q.out    | 1957 -----
 ql/src/test/results/clientpositive/join5.q.out     |  221 -
 ql/src/test/results/clientpositive/join6.q.out     |  226 -
 ql/src/test/results/clientpositive/join7.q.out     |  294 -
 ql/src/test/results/clientpositive/join8.q.out     |  220 -
 .../results/clientpositive/join_alt_syntax.q.out   |  786 --
 .../join_by_range_rule_not_null.q.out              |  442 --
 .../clientpositive/join_cond_pushdown_1.q.out      |  489 --
 .../clientpositive/join_cond_pushdown_2.q.out      |  348 -
 .../clientpositive/join_cond_pushdown_3.q.out      |  497 --
 .../clientpositive/join_cond_pushdown_4.q.out      |  352 -
 .../join_cond_pushdown_unqual1.q.out               |  569 --
 .../join_cond_pushdown_unqual2.q.out               |  412 -
 .../join_cond_pushdown_unqual3.q.out               |  577 --
 .../join_cond_pushdown_unqual4.q.out               |  416 -
 .../join_cond_pushdown_unqual5.q.out               |  215 -
 .../clientpositive/join_filters_overlap.q.out      | 2033 -----
 .../clientpositive/join_grp_diff_keys.q.out        |  277 -
 .../results/clientpositive/join_hive_626.q.out     |  206 -
 .../join_merge_multi_expressions.q.out             |  173 -
 .../test/results/clientpositive/join_merging.q.out |  263 -
 .../results/clientpositive/join_on_varchar.q.out   |  156 -
 .../test/results/clientpositive/join_parse.q.out   |  603 --
 .../test/results/clientpositive/join_reorder.q.out |  724 --
 .../results/clientpositive/join_reorder2.q.out     |  444 --
 .../results/clientpositive/join_reorder3.q.out     |  444 --
 .../results/clientpositive/join_reorder4.q.out     |  438 -
 ql/src/test/results/clientpositive/join_star.q.out |  946 ---
 .../test/results/clientpositive/join_thrift.q.out  |  119 -
 ql/src/test/results/clientpositive/join_view.q.out |  147 -
 .../results/clientpositive/lateral_view_cp.q.out   |  164 -
 .../clientpositive/lateral_view_explode2.q.out     |  108 -
 .../clientpositive/lateral_view_noalias.q.out      |  325 -
 .../clientpositive/lateral_view_onview.q.out       |  856 --
 .../clientpositive/lateral_view_onview2.q.out      |  175 -
 .../clientpositive/lateral_view_outer.q.out        |  294 -
 .../results/clientpositive/lateral_view_ppd.q.out  |  525 --
 .../results/clientpositive/limit_pushdown2.q.out   | 1236 ---
 .../clientpositive/limit_pushdown_negative.q.out   |  463 --
 ql/src/test/results/clientpositive/lineage1.q.out  |  327 -
 .../clientpositive/list_bucket_dml_14.q.out        |  431 -
 .../results/clientpositive/list_bucket_dml_4.q.out |  984 ---
 .../results/clientpositive/list_bucket_dml_9.q.out |  984 ---
 .../clientpositive/{ => llap}/groupby9.q.out       | 1635 ++--
 .../{ => llap}/groupby_complex_types.q.out         |  236 +-
 ...roupby_complex_types_multi_single_reducer.q.out |  262 +
 .../clientpositive/llap/groupby_cube1.q.out        |  895 +++
 .../llap/groupby_cube_multi_gby.q.out              |  233 +
 .../{ => llap}/groupby_distinct_samekey.q.out      |  150 +-
 .../llap/groupby_duplicate_key.q.out               |  372 +
 .../clientpositive/llap/groupby_grouping_id3.q.out |  227 +
 .../llap/groupby_grouping_sets1.q.out              |  690 ++
 .../llap/groupby_grouping_sets2.q.out              |  483 ++
 .../llap/groupby_grouping_sets3.q.out              |  319 +
 .../llap/groupby_grouping_sets4.q.out              |  502 ++
 .../llap/groupby_grouping_sets5.q.out              |  377 +
 .../llap/groupby_grouping_sets6.q.out              |  186 +
 .../llap/groupby_grouping_sets_grouping.q.out      | 1540 ++++
 .../llap/groupby_grouping_sets_limit.q.out         |  696 ++
 .../{ => llap}/groupby_grouping_sets_view.q.out    |    0
 .../llap/groupby_grouping_window.q.out             |  209 +
 .../llap/groupby_join_pushdown.q.out               | 1606 ++++
 .../clientpositive/llap/groupby_map_ppr.q.out      |  368 +
 .../llap/groupby_map_ppr_multi_distinct.q.out      |  370 +
 .../groupby_multi_insert_common_distinct.q.out     |  280 +
 .../{ => llap}/groupby_multi_single_reducer.q.out  |  580 +-
 .../llap/groupby_multi_single_reducer2.q.out       |  276 +
 .../llap/groupby_multi_single_reducer3.q.out       | 1054 +++
 .../clientpositive/llap/groupby_multialias.q.out   |   82 +
 .../{ => llap}/groupby_nocolumnalign.q.out         |  163 +-
 .../clientpositive/llap/groupby_position.q.out     | 1850 +++++
 .../results/clientpositive/llap/groupby_ppd.q.out  |  120 +
 .../results/clientpositive/llap/groupby_ppr.q.out  |  354 +
 .../llap/groupby_ppr_multi_distinct.q.out          |  704 ++
 .../clientpositive/llap/groupby_rollup1.q.out      |  708 ++
 .../{ => llap}/groupby_sort_10.q.out               |  111 +-
 .../clientpositive/llap/groupby_sort_11.q.out      |  644 ++
 .../clientpositive/llap/groupby_sort_1_23.q.out    | 5704 +++++++++++++
 .../clientpositive/llap/groupby_sort_2.q.out       |  188 +
 .../clientpositive/llap/groupby_sort_3.q.out       |  310 +
 .../clientpositive/llap/groupby_sort_4.q.out       |  347 +
 .../clientpositive/{ => llap}/groupby_sort_5.q.out |  493 +-
 .../clientpositive/llap/groupby_sort_6.q.out       |  769 ++
 .../clientpositive/{ => llap}/groupby_sort_7.q.out |  168 +-
 .../clientpositive/{ => llap}/groupby_sort_8.q.out |   82 +-
 .../clientpositive/{ => llap}/groupby_sort_9.q.out |   83 +-
 .../llap/groupby_sort_skew_1_23.q.out              | 5892 ++++++++++++++
 .../clientpositive/llap/groupby_sort_test_1.q.out  |  143 +
 .../{ => llap}/groupingset_high_columns.q.out      |  416 +-
 .../results/clientpositive/llap/hashjoin.q.out     |  586 ++
 .../test/results/clientpositive/llap/having2.q.out |  692 ++
 .../results/clientpositive/{ => llap}/hll.q.out    |  152 +-
 .../clientpositive/{ => llap}/implicit_cast1.q.out |   36 +-
 .../llap/implicit_cast_during_insert.q.out         |  154 +
 .../{ => llap}/implicit_decimal.q.out              |   44 +-
 .../clientpositive/llap/in_typecheck_char.q.out    |  461 ++
 .../{ => llap}/in_typecheck_mixed.q.out            |   36 +-
 .../llap/in_typecheck_pointlook.q.out              |  204 +
 .../clientpositive/llap/in_typecheck_varchar.q.out |  207 +
 .../infer_bucket_sort_convert_join.q.out           |    9 +-
 .../{ => llap}/infer_bucket_sort_dyn_part.q.out    |  235 +-
 .../infer_bucket_sort_grouping_operators.q.out     | 1821 ++---
 .../{ => llap}/infer_bucket_sort_list_bucket.q.out |    4 +-
 .../infer_bucket_sort_map_operators.q.out          |  824 +-
 .../{ => llap}/infer_bucket_sort_merge.q.out       |    8 +-
 .../infer_bucket_sort_multi_insert.q.out           |   40 +-
 .../llap/infer_bucket_sort_num_buckets.q.out       |  439 +
 .../infer_bucket_sort_reducers_power_two.q.out     |   36 +-
 .../{ => llap}/infer_const_type.q.out              |  108 +-
 .../clientpositive/llap/infer_join_preds.q.out     | 1433 ++++
 .../clientpositive/{ => llap}/innerjoin.q.out      |  306 +-
 .../results/clientpositive/llap/innerjoin1.q.out   |  278 +
 .../results/clientpositive/{ => llap}/input.q.out  |    2 -
 .../results/clientpositive/{ => llap}/input0.q.out |    2 -
 .../test/results/clientpositive/llap/input11.q.out |  213 +
 .../clientpositive/llap/input11_limit.q.out        |  128 +
 .../clientpositive/{ => llap}/input12.q.out        |  380 +-
 .../clientpositive/{ => llap}/input13.q.out        |  408 +-
 .../results/clientpositive/llap/input1_limit.q.out |  283 +
 .../test/results/clientpositive/llap/input21.q.out |   93 +
 .../test/results/clientpositive/llap/input22.q.out |  115 +
 .../test/results/clientpositive/llap/input23.q.out |  197 +
 .../test/results/clientpositive/llap/input25.q.out |  170 +
 .../test/results/clientpositive/llap/input26.q.out |  164 +
 .../results/clientpositive/llap/input2_limit.q.out |   43 +
 .../test/results/clientpositive/llap/input30.q.out |  269 +
 .../test/results/clientpositive/llap/input32.q.out |  140 +
 .../results/clientpositive/llap/input3_limit.q.out |  202 +
 .../results/clientpositive/{ => llap}/input4.q.out |    2 +-
 .../clientpositive/{ => llap}/input42.q.out        |  148 +-
 .../results/clientpositive/llap/input4_limit.q.out |  112 +
 .../test/results/clientpositive/llap/input6.q.out  |  129 +
 .../test/results/clientpositive/llap/input7.q.out  |  150 +
 .../test/results/clientpositive/llap/input8.q.out  |  151 +
 .../test/results/clientpositive/llap/input9.q.out  |  132 +
 .../{ => llap}/input_columnarserde.q.out           |   75 +-
 .../{ => llap}/input_dynamicserde.q.out            |   92 +-
 .../{ => llap}/input_lazyserde.q.out               |   74 +-
 .../{ => llap}/input_lazyserde2.q.out              |   74 +-
 .../clientpositive/{ => llap}/input_limit.q.out    |    3 -
 .../clientpositive/{ => llap}/input_part0.q.out    |    2 -
 .../results/clientpositive/llap/input_part1.q.out  |  347 +
 .../results/clientpositive/llap/input_part10.q.out |  192 +
 .../results/clientpositive/llap/input_part2.q.out  |  645 ++
 .../clientpositive/{ => llap}/input_part3.q.out    |    2 -
 .../clientpositive/{ => llap}/input_part4.q.out    |    3 -
 .../clientpositive/{ => llap}/input_part5.q.out    |  150 +-
 .../clientpositive/{ => llap}/input_part6.q.out    |    3 -
 .../results/clientpositive/llap/input_part7.q.out  |  705 ++
 .../clientpositive/{ => llap}/input_part8.q.out    |    3 -
 .../clientpositive/{ => llap}/input_part9.q.out    |   74 +-
 .../{ => llap}/input_testsequencefile.q.out        |  152 +-
 .../clientpositive/llap/input_testxpath.q.out      |  137 +
 .../clientpositive/llap/input_testxpath2.q.out     |  140 +
 .../{ => llap}/input_testxpath3.q.out              |   28 +-
 .../{ => llap}/input_testxpath4.q.out              |   68 +-
 .../clientpositive/{ => llap}/insert0.q.out        |   48 +-
 .../llap/insert2_overwrite_partitions.q.out        |  397 +
 .../{ => llap}/insert_nonacid_from_acid.q.out      |   10 +-
 .../llap/insertoverwrite_bucket.q.out              |  379 +
 .../{ => llap}/intersect_all_rj.q.out              |    6 +-
 .../clientpositive/{ => llap}/interval_3.q.out     |  130 +-
 .../clientpositive/{ => llap}/interval_alt.q.out   |   29 +-
 .../{ => llap}/interval_arithmetic.q.out           |  213 +-
 .../clientpositive/{ => llap}/ivyDownload.q.out    |    3 -
 .../results/clientpositive/{ => llap}/join10.q.out |  124 +-
 .../test/results/clientpositive/llap/join11.q.out  |  270 +
 .../test/results/clientpositive/llap/join12.q.out  |  405 +
 .../test/results/clientpositive/llap/join13.q.out  |  330 +
 .../results/clientpositive/{ => llap}/join14.q.out |  198 +-
 .../results/clientpositive/{ => llap}/join15.q.out |  156 +-
 .../test/results/clientpositive/llap/join16.q.out  |   91 +
 .../results/clientpositive/{ => llap}/join17.q.out |  465 +-
 .../results/clientpositive/{ => llap}/join18.q.out |  204 +-
 .../{ => llap}/join18_multi_distinct.q.out         |  208 +-
 .../test/results/clientpositive/llap/join19.q.out  |  339 +
 .../results/clientpositive/{ => llap}/join2.q.out  |  209 +-
 .../results/clientpositive/{ => llap}/join20.q.out |  450 +-
 .../results/clientpositive/{ => llap}/join21.q.out |  226 +-
 .../test/results/clientpositive/llap/join22.q.out  |  119 +
 .../test/results/clientpositive/llap/join23.q.out  |  210 +
 .../test/results/clientpositive/llap/join25.q.out  |  212 +
 .../test/results/clientpositive/llap/join26.q.out  |  583 ++
 .../test/results/clientpositive/llap/join27.q.out  |  216 +
 .../test/results/clientpositive/llap/join28.q.out  |  332 +
 .../test/results/clientpositive/llap/join29.q.out  |  233 +
 .../results/clientpositive/{ => llap}/join3.q.out  |  261 +-
 .../test/results/clientpositive/llap/join30.q.out  |  208 +
 .../test/results/clientpositive/llap/join31.q.out  |  242 +
 .../test/results/clientpositive/llap/join32.q.out  |  562 ++
 .../test/results/clientpositive/llap/join33.q.out  |  562 ++
 .../test/results/clientpositive/llap/join34.q.out  |  578 ++
 .../test/results/clientpositive/llap/join35.q.out  |  607 ++
 .../results/clientpositive/{ => llap}/join36.q.out |  209 +-
 .../test/results/clientpositive/llap/join37.q.out  |  212 +
 .../test/results/clientpositive/llap/join38.q.out  |  170 +
 .../results/clientpositive/{ => llap}/join39.q.out |  185 +-
 .../test/results/clientpositive/llap/join4.q.out   |  231 +
 .../results/clientpositive/{ => llap}/join40.q.out |  956 +--
 .../test/results/clientpositive/llap/join41.q.out  |  208 +
 .../test/results/clientpositive/llap/join42.q.out  |  409 +
 .../results/clientpositive/{ => llap}/join43.q.out |  574 +-
 .../test/results/clientpositive/llap/join44.q.out  |  106 +
 .../test/results/clientpositive/llap/join45.q.out  | 2359 ++++++
 .../test/results/clientpositive/llap/join47.q.out  | 2359 ++++++
 .../test/results/clientpositive/llap/join5.q.out   |  229 +
 .../test/results/clientpositive/llap/join6.q.out   |  234 +
 .../test/results/clientpositive/llap/join7.q.out   |  289 +
 .../test/results/clientpositive/llap/join8.q.out   |  228 +
 .../results/clientpositive/{ => llap}/join9.q.out  |  508 +-
 .../clientpositive/{ => llap}/join_1to1.q.out      |   92 +-
 .../clientpositive/llap/join_alt_syntax.q.out      |  765 ++
 .../llap/join_by_range_rule_not_null.q.out         |  518 ++
 .../clientpositive/llap/join_cond_pushdown_1.q.out |  477 ++
 .../clientpositive/llap/join_cond_pushdown_2.q.out |  310 +
 .../clientpositive/llap/join_cond_pushdown_3.q.out |  485 ++
 .../clientpositive/llap/join_cond_pushdown_4.q.out |  314 +
 .../llap/join_cond_pushdown_unqual1.q.out          |  603 ++
 .../llap/join_cond_pushdown_unqual2.q.out          |  397 +
 .../llap/join_cond_pushdown_unqual3.q.out          |  611 ++
 .../llap/join_cond_pushdown_unqual4.q.out          |  401 +
 .../llap/join_cond_pushdown_unqual5.q.out          |  233 +
 .../clientpositive/llap/join_filters_overlap.q.out | 1652 ++++
 .../clientpositive/llap/join_grp_diff_keys.q.out   |  271 +
 .../clientpositive/llap/join_hive_626.q.out        |  215 +
 .../llap/join_merge_multi_expressions.q.out        |  188 +
 .../results/clientpositive/llap/join_merging.q.out |  279 +
 .../clientpositive/llap/join_on_varchar.q.out      |  162 +
 .../results/clientpositive/llap/join_parse.q.out   |  619 ++
 .../clientpositive/{ => llap}/join_rc.q.out        |  124 +-
 .../results/clientpositive/llap/join_reorder.q.out |  806 ++
 .../clientpositive/llap/join_reorder2.q.out        |  452 ++
 .../clientpositive/llap/join_reorder3.q.out        |  452 ++
 .../clientpositive/llap/join_reorder4.q.out        |  465 ++
 .../results/clientpositive/llap/join_star.q.out    | 1011 +++
 .../results/clientpositive/llap/join_thrift.q.out  |  133 +
 .../results/clientpositive/llap/join_view.q.out    |  161 +
 .../clientpositive/{ => llap}/keyword_1.q.out      |    4 -
 .../clientpositive/llap/lateral_view_cp.q.out      |  167 +
 .../llap/lateral_view_explode2.q.out               |  130 +
 .../clientpositive/llap/lateral_view_noalias.q.out |  296 +
 .../clientpositive/llap/lateral_view_onview.q.out  |  711 ++
 .../clientpositive/llap/lateral_view_onview2.q.out |  117 +
 .../clientpositive/llap/lateral_view_outer.q.out   |  207 +
 .../clientpositive/llap/lateral_view_ppd.q.out     |  356 +
 .../{ => llap}/leadlag_queries.q.out               |   76 +-
 .../clientpositive/llap/limit_pushdown2.q.out      | 1363 ++++
 .../llap/limit_pushdown_negative.q.out             |  440 +
 .../results/clientpositive/llap/lineage1.q.out     |  289 +
 .../{ => llap}/list_bucket_dml_1.q.out             |  461 +-
 .../{ => llap}/list_bucket_dml_11.q.out            |  365 +-
 .../{ => llap}/list_bucket_dml_12.q.out            |  448 +-
 .../{ => llap}/list_bucket_dml_13.q.out            |  369 +-
 .../clientpositive/llap/list_bucket_dml_14.q.out   |  359 +
 .../{ => llap}/list_bucket_dml_2.q.out             |  479 +-
 .../{ => llap}/list_bucket_dml_3.q.out             |  461 +-
 .../clientpositive/llap/list_bucket_dml_4.q.out    |  853 ++
 .../{ => llap}/list_bucket_dml_5.q.out             |  464 +-
 .../{ => llap}/list_bucket_dml_6.q.out             | 1026 ++-
 .../{ => llap}/list_bucket_dml_7.q.out             | 1026 ++-
 .../clientpositive/llap/list_bucket_dml_9.q.out    |  853 ++
 .../{ => llap}/list_bucket_query_multiskew_1.q.out |  284 +-
 .../{ => llap}/list_bucket_query_multiskew_2.q.out |  363 +-
 .../{ => llap}/list_bucket_query_multiskew_3.q.out |  213 +-
 .../{ => llap}/list_bucket_query_oneskew_1.q.out   |  214 +-
 .../{ => llap}/list_bucket_query_oneskew_2.q.out   |  611 +-
 .../{ => llap}/list_bucket_query_oneskew_3.q.out   |  166 +-
 .../{ => llap}/literal_decimal.q.out               |    3 -
 .../clientpositive/{ => llap}/literal_double.q.out |    3 -
 .../clientpositive/{ => llap}/literal_ints.q.out   |    3 -
 .../clientpositive/{ => llap}/literal_string.q.out |    3 -
 .../results/clientpositive/llap/llap_reader.q.out  |  225 +
 .../{ => llap}/load_dyn_part10.q.out               |  122 +-
 .../{ => llap}/load_dyn_part11.q.out               | 3810 ++++-----
 .../{ => llap}/load_dyn_part12.q.out               | 3662 ++++-----
 .../clientpositive/llap/load_dyn_part13.q.out      |  288 +
 .../clientpositive/llap/load_dyn_part14.q.out      |  320 +
 .../clientpositive/{ => llap}/load_dyn_part4.q.out |  122 +-
 .../clientpositive/{ => llap}/load_dyn_part6.q.out | 3662 ++++-----
 .../clientpositive/{ => llap}/load_dyn_part8.q.out |  829 +-
 .../clientpositive/{ => llap}/load_dyn_part9.q.out |  122 +-
 .../load_static_ptn_into_bucketed_table.q.out      |  312 +-
 .../clientpositive/llap/louter_join_ppr.q.out      | 1368 ++++
 .../results/clientpositive/{ => llap}/macro.q.out  |   18 -
 .../clientpositive/{ => llap}/mapreduce5.q.out     |  151 +-
 .../clientpositive/{ => llap}/mapreduce6.q.out     |  151 +-
 .../results/clientpositive/llap/masking_1.q.out    |  432 +
 .../results/clientpositive/llap/masking_10.q.out   |  261 +
 .../results/clientpositive/llap/masking_12.q.out   |  482 ++
 .../clientpositive/{ => llap}/masking_13.q.out     |   76 +-
 .../{ => llap}/masking_1_newdb.q.out               |   72 +-
 .../results/clientpositive/llap/masking_2.q.out    |  391 +
 .../clientpositive/{ => llap}/masking_3.q.out      | 7715 +++++++++---------
 .../results/clientpositive/llap/masking_4.q.out    |  282 +
 .../clientpositive/{ => llap}/masking_6.q.out      |  160 +-
 .../results/clientpositive/llap/masking_7.q.out    |  231 +
 .../clientpositive/{ => llap}/masking_8.q.out      |  143 +-
 .../clientpositive/{ => llap}/masking_9.q.out      |   36 +-
 .../clientpositive/llap/masking_disablecbo_1.q.out |  428 +
 .../clientpositive/llap/masking_disablecbo_2.q.out |  381 +
 .../{ => llap}/masking_disablecbo_3.q.out          | 7329 ++++++++---------
 .../clientpositive/llap/masking_disablecbo_4.q.out |  212 +
 .../clientpositive/{ => llap}/masking_mv.q.out     |  820 +-
 .../clientpositive/llap/masking_reserved.q.out     |  244 +
 .../results/clientpositive/{ => llap}/merge3.q.out | 1501 ++--
 .../results/clientpositive/{ => llap}/merge4.q.out |  588 +-
 .../{ => llap}/merge_dynamic_partition.q.out       |  518 +-
 .../{ => llap}/merge_dynamic_partition2.q.out      |  199 +-
 .../{ => llap}/merge_dynamic_partition3.q.out      |  199 +-
 .../{ => llap}/merge_dynamic_partition4.q.out      |  154 +-
 .../{ => llap}/merge_dynamic_partition5.q.out      |  154 +-
 .../clientpositive/{ => llap}/merge_empty.q.out    |    2 -
 .../results/clientpositive/llap/merge_join_1.q.out |  206 +
 .../results/clientpositive/llap/mergejoins.q.out   |  386 +
 .../test/results/clientpositive/llap_reader.q.out  |   88 -
 .../results/clientpositive/load_dyn_part13.q.out   |  270 -
 .../results/clientpositive/load_dyn_part14.q.out   |  376 -
 .../results/clientpositive/louter_join_ppr.q.out   | 1292 ---
 ql/src/test/results/clientpositive/masking_1.q.out |  530 --
 .../test/results/clientpositive/masking_10.q.out   |  266 -
 .../test/results/clientpositive/masking_12.q.out   |  501 --
 ql/src/test/results/clientpositive/masking_2.q.out |  385 -
 ql/src/test/results/clientpositive/masking_4.q.out |  331 -
 ql/src/test/results/clientpositive/masking_7.q.out |  295 -
 .../clientpositive/masking_disablecbo_1.q.out      |  526 --
 .../clientpositive/masking_disablecbo_2.q.out      |  371 -
 .../clientpositive/masking_disablecbo_4.q.out      |  246 -
 .../results/clientpositive/masking_reserved.q.out  |  216 -
 .../test/results/clientpositive/merge_join_1.q.out |  205 -
 .../test/results/clientpositive/mergejoins.q.out   |  389 -
 442 files changed, 102562 insertions(+), 110742 deletions(-)

diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index cf3bc5c..de14c81 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -2238,7 +2238,278 @@ minillaplocal.query.files=\
   groupby8.q,\
   groupby8_map.q,\
   groupby8_map_skew.q,\
-  groupby8_noskew.q
+  groupby8_noskew.q,\
+  groupby9.q,\
+  groupby_complex_types.q,\
+  groupby_complex_types_multi_single_reducer.q,\
+  groupby_cube1.q,\
+  groupby_cube_multi_gby.q,\
+  groupby_distinct_samekey.q,\
+  groupby_duplicate_key.q,\
+  groupby_grouping_id3.q,\
+  groupby_grouping_sets1.q,\
+  groupby_grouping_sets2.q,\
+  groupby_grouping_sets3.q,\
+  groupby_grouping_sets4.q,\
+  groupby_grouping_sets5.q,\
+  groupby_grouping_sets6.q,\
+  groupby_grouping_sets_grouping.q,\
+  groupby_grouping_sets_limit.q,\
+  groupby_grouping_sets_view.q,\
+  groupby_grouping_window.q,\
+  groupby_join_pushdown.q,\
+  groupby_map_ppr.q,\
+  groupby_map_ppr_multi_distinct.q,\
+  groupby_multi_insert_common_distinct.q,\
+  groupby_multi_single_reducer.q,\
+  groupby_multi_single_reducer2.q,\
+  groupby_multi_single_reducer3.q,\
+  groupby_multialias.q,\
+  groupby_nocolumnalign.q,\
+  groupby_position.q,\
+  groupby_ppd.q,\
+  groupby_ppr.q,\
+  groupby_ppr_multi_distinct.q,\
+  groupby_rollup1.q,\
+  groupby_sort_10.q,\
+  groupby_sort_11.q,\
+  groupby_sort_1_23.q,\
+  groupby_sort_2.q,\
+  groupby_sort_3.q,\
+  groupby_sort_4.q,\
+  groupby_sort_5.q,\
+  groupby_sort_6.q,\
+  groupby_sort_7.q,\
+  groupby_sort_8.q,\
+  groupby_sort_9.q,\
+  groupby_sort_skew_1_23.q,\
+  groupby_sort_test_1.q,\
+  groupingset_high_columns.q,\
+  hashjoin.q,\
+  having2.q,\
+  hll.q,\
+  implicit_cast1.q,\
+  implicit_cast_during_insert.q,\
+  implicit_decimal.q,\
+  in_typecheck_char.q,\
+  in_typecheck_mixed.q,\
+  in_typecheck_pointlook.q,\
+  in_typecheck_varchar.q,\
+  infer_bucket_sort_convert_join.q,\
+  infer_bucket_sort_dyn_part.q,\
+  infer_bucket_sort_grouping_operators.q,\
+  infer_bucket_sort_list_bucket.q,\
+  infer_bucket_sort_map_operators.q,\
+  infer_bucket_sort_merge.q,\
+  infer_bucket_sort_multi_insert.q,\
+  infer_bucket_sort_num_buckets.q,\
+  infer_bucket_sort_reducers_power_two.q,\
+  infer_const_type.q,\
+  infer_join_preds.q,\
+  innerjoin.q,\
+  innerjoin1.q,\
+  input.q,\
+  input0.q,\
+  input11.q,\
+  input11_limit.q,\
+  input12.q,\
+  input13.q,\
+  input1_limit.q,\
+  input21.q,\
+  input22.q,\
+  input23.q,\
+  input25.q,\
+  input26.q,\
+  input2_limit.q,\
+  input30.q,\
+  input32.q,\
+  input3_limit.q,\
+  input4.q,\
+  input42.q,\
+  input4_limit.q,\
+  input6.q,\
+  input7.q,\
+  input8.q,\
+  input9.q,\
+  input_columnarserde.q,\
+  input_dynamicserde.q,\
+  input_lazyserde.q,\
+  input_lazyserde2.q,\
+  input_limit.q,\
+  input_part0.q,\
+  input_part1.q,\
+  input_part10.q,\
+  input_part2.q,\
+  input_part3.q,\
+  input_part4.q,\
+  input_part5.q,\
+  input_part6.q,\
+  input_part7.q,\
+  input_part8.q,\
+  input_part9.q,\
+  input_testsequencefile.q,\
+  input_testxpath.q,\
+  input_testxpath2.q,\
+  input_testxpath3.q,\
+  input_testxpath4.q,\
+  insert0.q,\
+  insert2_overwrite_partitions.q,\
+  insert_nonacid_from_acid.q,\
+  insertoverwrite_bucket.q,\
+  intersect_all_rj.q,\
+  interval_3.q,\
+  interval_alt.q,\
+  interval_arithmetic.q,\
+  ivyDownload.q,\
+  join10.q,\
+  join11.q,\
+  join12.q,\
+  join13.q,\
+  join14.q,\
+  join15.q,\
+  join16.q,\
+  join17.q,\
+  join18.q,\
+  join18_multi_distinct.q,\
+  join19.q,\
+  join2.q,\
+  join20.q,\
+  join21.q,\
+  join22.q,\
+  join23.q,\
+  join25.q,\
+  join26.q,\
+  join27.q,\
+  join28.q,\
+  join29.q,\
+  join3.q,\
+  join30.q,\
+  join31.q,\
+  join32.q,\
+  join33.q,\
+  join34.q,\
+  join35.q,\
+  join36.q,\
+  join37.q,\
+  join38.q,\
+  join39.q,\
+  join4.q,\
+  join40.q,\
+  join41.q,\
+  join42.q,\
+  join43.q,\
+  join44.q,\
+  join45.q,\
+  join47.q,\
+  join5.q,\
+  join6.q,\
+  join7.q,\
+  join8.q,\
+  join9.q,\
+  join_1to1.q,\
+  join_alt_syntax.q,\
+  join_by_range_rule_not_null.q,\
+  join_cond_pushdown_1.q,\
+  join_cond_pushdown_2.q,\
+  join_cond_pushdown_3.q,\
+  join_cond_pushdown_4.q,\
+  join_cond_pushdown_unqual1.q,\
+  join_cond_pushdown_unqual2.q,\
+  join_cond_pushdown_unqual3.q,\
+  join_cond_pushdown_unqual4.q,\
+  join_cond_pushdown_unqual5.q,\
+  join_filters_overlap.q,\
+  join_grp_diff_keys.q,\
+  join_hive_626.q,\
+  join_merge_multi_expressions.q,\
+  join_merging.q,\
+  join_on_varchar.q,\
+  join_parse.q,\
+  join_rc.q,\
+  join_reorder.q,\
+  join_reorder2.q,\
+  join_reorder3.q,\
+  join_reorder4.q,\
+  join_star.q,\
+  join_thrift.q,\
+  join_view.q,\
+  keyword_1.q,\
+  lateral_view_cp.q,\
+  lateral_view_explode2.q,\
+  lateral_view_noalias.q,\
+  lateral_view_onview.q,\
+  lateral_view_onview2.q,\
+  lateral_view_outer.q,\
+  lateral_view_ppd.q,\
+  leadlag_queries.q,\
+  limit_pushdown2.q,\
+  limit_pushdown_negative.q,\
+  lineage1.q,\
+  list_bucket_dml_1.q,\
+  list_bucket_dml_11.q,\
+  list_bucket_dml_12.q,\
+  list_bucket_dml_13.q,\
+  list_bucket_dml_14.q,\
+  list_bucket_dml_2.q,\
+  list_bucket_dml_3.q,\
+  list_bucket_dml_4.q,\
+  list_bucket_dml_5.q,\
+  list_bucket_dml_6.q,\
+  list_bucket_dml_7.q,\
+  list_bucket_dml_9.q,\
+  list_bucket_query_multiskew_1.q,\
+  list_bucket_query_multiskew_2.q,\
+  list_bucket_query_multiskew_3.q,\
+  list_bucket_query_oneskew_1.q,\
+  list_bucket_query_oneskew_2.q,\
+  list_bucket_query_oneskew_3.q,\
+  literal_decimal.q,\
+  literal_double.q,\
+  literal_ints.q,\
+  literal_string.q,\
+  llap_reader.q,\
+  load_dyn_part10.q,\
+  load_dyn_part11.q,\
+  load_dyn_part12.q,\
+  load_dyn_part13.q,\
+  load_dyn_part14.q,\
+  load_dyn_part4.q,\
+  load_dyn_part6.q,\
+  load_dyn_part8.q,\
+  load_dyn_part9.q,\
+  load_static_ptn_into_bucketed_table.q,\
+  louter_join_ppr.q,\
+  macro.q,\
+  mapreduce5.q,\
+  mapreduce6.q,\
+  masking_1.q,\
+  masking_10.q,\
+  masking_12.q,\
+  masking_13.q,\
+  masking_1_newdb.q,\
+  masking_2.q,\
+  masking_3.q,\
+  masking_4.q,\
+  masking_6.q,\
+  masking_7.q,\
+  masking_8.q,\
+  masking_9.q,\
+  masking_disablecbo_1.q,\
+  masking_disablecbo_2.q,\
+  masking_disablecbo_3.q,\
+  masking_disablecbo_4.q,\
+  masking_mv.q,\
+  masking_reserved.q,\
+  merge3.q,\
+  merge4.q,\
+  merge_dynamic_partition.q,\
+  merge_dynamic_partition2.q,\
+  merge_dynamic_partition3.q,\
+  merge_dynamic_partition4.q,\
+  merge_dynamic_partition5.q,\
+  merge_empty.q,\
+  merge_join_1.q,\
+  mergejoins.q
 
 encrypted.query.files=encryption_join_unencrypted_tbl.q,\
   encryption_insert_partition_static.q,\
diff --git a/ql/src/test/queries/clientpositive/groupby_duplicate_key.q b/ql/src/test/queries/clientpositive/groupby_duplicate_key.q
index ce5a091..a2e1faf 100644
--- a/ql/src/test/queries/clientpositive/groupby_duplicate_key.q
+++ b/ql/src/test/queries/clientpositive/groupby_duplicate_key.q
@@ -1,18 +1,18 @@
 --! qt:dataset:src
 explain
-select distinct key, "" as dummy1, "" as dummy2 from src tablesample (10 rows);
+select distinct key, "" as dummy1, "" as dummy2 from src tablesample (10 rows) order by key;
 
-select distinct key, "" as dummy1, "" as dummy2 from src tablesample (10 rows);
+select distinct key, "" as dummy1, "" as dummy2 from src tablesample (10 rows) order by key;
 
 explain
 create table dummy_n6 as
-select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows);
+select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows) order by key;
 
 create table dummy_n6 as
-select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows);
+select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows) order by key;
 
-select key,dummy1,dummy2 from dummy_n6;
+select key,dummy1,dummy2 from dummy_n6 order by key;
 
 explain
-select max('pants'), max('pANTS') from src group by key limit 1;
-select max('pants'), max('pANTS') from src group by key limit 1;
+select max('pants'), max('pANTS') from src group by key order by key limit 1;
+select max('pants'), max('pANTS') from src group by key order by key limit 1;
diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_sets_limit.q b/ql/src/test/queries/clientpositive/groupby_grouping_sets_limit.q
index cda9c03..3c4a973 100644
--- a/ql/src/test/queries/clientpositive/groupby_grouping_sets_limit.q
+++ b/ql/src/test/queries/clientpositive/groupby_grouping_sets_limit.q
@@ -6,31 +6,31 @@ CREATE TABLE T1_n141(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS T
 LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n141;
 
 EXPLAIN
-SELECT a, b, count(*) from T1_n141 group by a, b with cube LIMIT 10;
+SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b WITH CUBE ORDER BY a, b, GROUPING__ID LIMIT 10;
 
-SELECT a, b, count(*) from T1_n141 group by a, b with cube LIMIT 10;
+SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b WITH CUBE ORDER BY a, b, GROUPING__ID LIMIT 10;
 
 EXPLAIN
-SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ()) LIMIT 10;
+SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ()) ORDER BY a, b, GROUPING__ID LIMIT 10;
 
-SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ()) LIMIT 10;
+SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ()) ORDER BY a, b, GROUPING__ID LIMIT 10;
 
 EXPLAIN
-SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10;
+SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b)) ORDER BY a, b, GROUPING__ID LIMIT 10;
 
-SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10;
+SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b)) ORDER BY a, b, GROUPING__ID LIMIT 10;
 
 EXPLAIN
-SELECT a FROM T1_n141 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10;
+SELECT a FROM T1_n141 GROUP BY a, b, c GROUPING SETS (a, b, c) ORDER BY a, b, c, GROUPING__ID LIMIT 10;
 
-SELECT a FROM T1_n141 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10;
+SELECT a FROM T1_n141 GROUP BY a, b, c GROUPING SETS (a, b, c) ORDER BY a, b, c, GROUPING__ID LIMIT 10;
 
 EXPLAIN
-SELECT a FROM T1_n141 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10;
+SELECT a FROM T1_n141 GROUP BY a GROUPING SETS ((a), (a)) ORDER BY a LIMIT 10;
 
-SELECT a FROM T1_n141 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10;
+SELECT a FROM T1_n141 GROUP BY a GROUPING SETS ((a), (a)) ORDER BY a LIMIT 10;
 
 EXPLAIN
-SELECT a + b, count(*) FROM T1_n141 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10;
+SELECT a + b, count(*) FROM T1_n141 GROUP BY a + b GROUPING SETS (a + b) ORDER BY a + b LIMIT 10;
 
-SELECT a + b, count(*) FROM T1_n141 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10;
+SELECT a + b, count(*) FROM T1_n141 GROUP BY a + b GROUPING SETS (a + b) ORDER BY a + b LIMIT 10;
diff --git a/ql/src/test/queries/clientpositive/groupby_grouping_sets_view.q b/ql/src/test/queries/clientpositive/groupby_grouping_sets_view.q
index 376d3d4..0489df7 100644
--- a/ql/src/test/queries/clientpositive/groupby_grouping_sets_view.q
+++ b/ql/src/test/queries/clientpositive/groupby_grouping_sets_view.q
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 
 create database test;
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_9.q b/ql/src/test/queries/clientpositive/groupby_sort_9.q
index 0f9be6a..7e4c75e 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_9.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_9.q
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
diff --git a/ql/src/test/queries/clientpositive/groupingset_high_columns.q b/ql/src/test/queries/clientpositive/groupingset_high_columns.q
index 977ced6..7107348 100644
--- a/ql/src/test/queries/clientpositive/groupingset_high_columns.q
+++ b/ql/src/test/queries/clientpositive/groupingset_high_columns.q
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 create table facts (val string);
 
 insert into facts values ('abcdefghijklmnopqrstuvwxyz0123456789');
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_grouping_operators.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_grouping_operators.q
index f3f22ae..9f09356 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_grouping_operators.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_grouping_operators.q
@@ -1,5 +1,7 @@
 --! qt:dataset:src
 --! qt:dataset:part
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 set hive.exec.infer.bucket.sort=true;
 
diff --git a/ql/src/test/queries/clientpositive/insert0.q b/ql/src/test/queries/clientpositive/insert0.q
index d87c6b8..4d02a73 100644
--- a/ql/src/test/queries/clientpositive/insert0.q
+++ b/ql/src/test/queries/clientpositive/insert0.q
@@ -16,7 +16,7 @@ INSERT INTO TABLE insert_into1_n1 SELECT * from src ORDER BY key DESC LIMIT 10;
 
 select * from insert_into1_n1 order by key;
 
-create table ctas_table as SELECT key, count(value) as foo from src GROUP BY key LIMIT 10;
+CREATE TABLE ctas_table AS SELECT key, count(value) AS foo FROM src GROUP BY key ORDER BY key LIMIT 10;
 
 describe extended ctas_table;
 
@@ -28,7 +28,7 @@ set hive.exec.dynamic.partition=true;
 create table ctas_part (key int, value string) partitioned by (modkey bigint);
 
 insert overwrite table ctas_part partition (modkey) 
-select key, value, ceil(key / 100) from src where key is not null limit 10;
+select key, value, ceil(key / 100) from src where key is not null order by key limit 10;
 
 select * from ctas_part order by key;
 
@@ -36,4 +36,4 @@ select * from ctas_part order by key;
 
 DROP TABLE insert_into1_n1;
 DROP TABLE ctas_table;
-DROP TABLE ctas_part;
\ No newline at end of file
+DROP TABLE ctas_part;
diff --git a/ql/src/test/queries/clientpositive/join45.q b/ql/src/test/queries/clientpositive/join45.q
index c0c7717..cacd80e 100644
--- a/ql/src/test/queries/clientpositive/join45.q
+++ b/ql/src/test/queries/clientpositive/join45.q
@@ -10,6 +10,7 @@ FROM src1 JOIN src
 ON (src1.key=src.key
   AND src1.value between 100 and 102
   AND src.value between 100 and 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
@@ -17,6 +18,7 @@ FROM src1 JOIN src
 ON (src1.key=src.key
   AND src1.value between 100 and 102
   AND src.value between 100 and 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Conjunction with pred on multiple inputs and none
@@ -24,11 +26,13 @@ EXPLAIN
 SELECT *
 FROM src1 JOIN src
 ON (src1.key=src.key AND true)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
 FROM src1 JOIN src
 ON (src1.key=src.key AND true)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Conjunction with pred on single inputs and none
@@ -38,6 +42,7 @@ FROM src1 JOIN src
 ON (src1.value between 100 and 102
   AND src.value between 100 and 102
   AND true)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
@@ -45,6 +50,7 @@ FROM src1 JOIN src
 ON (src1.value between 100 and 102
   AND src.value between 100 and 102
   AND true)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Disjunction with pred on multiple inputs and single inputs
@@ -54,6 +60,7 @@ FROM src1 JOIN src
 ON (src1.key=src.key
   OR src1.value between 100 and 102
   OR src.value between 100 and 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
@@ -61,6 +68,7 @@ FROM src1 JOIN src
 ON (src1.key=src.key
   OR src1.value between 100 and 102
   OR src.value between 100 and 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Conjunction with multiple inputs on one side
@@ -69,12 +77,14 @@ SELECT *
 FROM src1 JOIN src
 ON (src1.key+src.key >= 100
   AND src1.key+src.key <= 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
 FROM src1 JOIN src
 ON (src1.key+src.key >= 100
   AND src1.key+src.key <= 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Disjunction with multiple inputs on one side
@@ -83,12 +93,14 @@ SELECT *
 FROM src1 JOIN src
 ON (src1.key+src.key >= 100
   OR src1.key+src.key <= 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
 FROM src1 JOIN src
 ON (src1.key+src.key >= 100
   OR src1.key+src.key <= 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Function with multiple inputs on one side
@@ -96,11 +108,13 @@ EXPLAIN
 SELECT *
 FROM src1 JOIN src
 ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102)))
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
 FROM src1 JOIN src
 ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102)))
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Chained 1
@@ -109,12 +123,14 @@ SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 LEFT OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 LEFT OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Chained 2
@@ -123,12 +139,14 @@ SELECT *
 FROM src
 LEFT OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 LEFT OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Chained 3
@@ -137,12 +155,14 @@ SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 RIGHT OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 RIGHT OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Chained 4
@@ -151,12 +171,14 @@ SELECT *
 FROM src
 RIGHT OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 RIGHT OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Chained 5
@@ -165,12 +187,14 @@ SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 FULL OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 FULL OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Chained 6
@@ -179,12 +203,14 @@ SELECT *
 FROM src
 FULL OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 FULL OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Right outer join with multiple inner joins and mixed conditions
@@ -195,6 +221,7 @@ RIGHT OUTER JOIN cbo_t1 t2 ON (t2.key = t1.key)
 JOIN cbo_t1 t3 ON (t3.key = t2.key or t3.value = t2.value and t2.c_int = t1.c_int)
 JOIN cbo_t1 t4 ON (t4.key = t2.key or  t2.c_float = t4.c_float and t4.value = t2.value)
 JOIN cbo_t1 t5 ON (t5.key = t2.key or  t2.c_boolean = t4.c_boolean and t5.c_int = 42)
+ORDER BY t1.key, t1.value, t1.c_int, t1.c_float, t1.c_boolean, t2.key, t2.value, t2.c_int, t2.c_float, t2.c_boolean, t3.key, t3.value, t3.c_int, t3.c_float, t3.c_boolean, t4.key, t4.value, t4.c_int, t4.c_float, t4.c_boolean, t5.key, t5.value, t5.c_int, t5.c_float, t5.c_boolean
 LIMIT 10;
 
 SELECT *
@@ -203,4 +230,5 @@ RIGHT OUTER JOIN cbo_t1 t2 ON (t2.key = t1.key)
 JOIN cbo_t1 t3 ON (t3.key = t2.key or t3.value = t2.value and t2.c_int = t1.c_int)
 JOIN cbo_t1 t4 ON (t4.key = t2.key or  t2.c_float = t4.c_float and t4.value = t2.value)
 JOIN cbo_t1 t5 ON (t5.key = t2.key or  t2.c_boolean = t4.c_boolean and t5.c_int = 42)
+ORDER BY t1.key, t1.value, t1.c_int, t1.c_float, t1.c_boolean, t2.key, t2.value, t2.c_int, t2.c_float, t2.c_boolean, t3.key, t3.value, t3.c_int, t3.c_float, t3.c_boolean, t4.key, t4.value, t4.c_int, t4.c_float, t4.c_boolean, t5.key, t5.value, t5.c_int, t5.c_float, t5.c_boolean
 LIMIT 10;
diff --git a/ql/src/test/queries/clientpositive/join47.q b/ql/src/test/queries/clientpositive/join47.q
index b5345d8..bd34c09 100644
--- a/ql/src/test/queries/clientpositive/join47.q
+++ b/ql/src/test/queries/clientpositive/join47.q
@@ -11,6 +11,7 @@ FROM src1 JOIN src
 ON (src1.key=src.key
   AND src1.value between 100 and 102
   AND src.value between 100 and 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
@@ -18,6 +19,7 @@ FROM src1 JOIN src
 ON (src1.key=src.key
   AND src1.value between 100 and 102
   AND src.value between 100 and 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Conjunction with pred on multiple inputs and none
@@ -25,11 +27,13 @@ EXPLAIN
 SELECT *
 FROM src1 JOIN src
 ON (src1.key=src.key AND true)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
 FROM src1 JOIN src
 ON (src1.key=src.key AND true)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Conjunction with pred on single inputs and none
@@ -39,6 +43,7 @@ FROM src1 JOIN src
 ON (src1.value between 100 and 102
   AND src.value between 100 and 102
   AND true)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
@@ -46,6 +51,7 @@ FROM src1 JOIN src
 ON (src1.value between 100 and 102
   AND src.value between 100 and 102
   AND true)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Disjunction with pred on multiple inputs and single inputs
@@ -55,6 +61,7 @@ FROM src1 JOIN src
 ON (src1.key=src.key
   OR src1.value between 100 and 102
   OR src.value between 100 and 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
@@ -62,6 +69,7 @@ FROM src1 JOIN src
 ON (src1.key=src.key
   OR src1.value between 100 and 102
   OR src.value between 100 and 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Conjunction with multiple inputs on one side
@@ -70,12 +78,14 @@ SELECT *
 FROM src1 JOIN src
 ON (src1.key+src.key >= 100
   AND src1.key+src.key <= 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
 FROM src1 JOIN src
 ON (src1.key+src.key >= 100
   AND src1.key+src.key <= 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Disjunction with multiple inputs on one side
@@ -84,12 +94,14 @@ SELECT *
 FROM src1 JOIN src
 ON (src1.key+src.key >= 100
   OR src1.key+src.key <= 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
 FROM src1 JOIN src
 ON (src1.key+src.key >= 100
   OR src1.key+src.key <= 102)
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Function with multiple inputs on one side
@@ -97,11 +109,13 @@ EXPLAIN
 SELECT *
 FROM src1 JOIN src
 ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102)))
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 SELECT *
 FROM src1 JOIN src
 ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102)))
+ORDER BY src1.key, src1.value, src.key, src.value
 LIMIT 10;
 
 -- Chained 1
@@ -110,12 +124,14 @@ SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 LEFT OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 LEFT OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Chained 2
@@ -124,12 +140,14 @@ SELECT *
 FROM src
 LEFT OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 LEFT OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Chained 3
@@ -138,12 +156,14 @@ SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 RIGHT OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 RIGHT OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Chained 4
@@ -152,12 +172,14 @@ SELECT *
 FROM src
 RIGHT OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 RIGHT OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Chained 5
@@ -166,12 +188,14 @@ SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 FULL OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 JOIN src1 a ON (a.key+src.key >= 100)
 FULL OUTER JOIN src1 b ON (b.key = src.key)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Chained 6
@@ -180,12 +204,14 @@ SELECT *
 FROM src
 FULL OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 SELECT *
 FROM src
 FULL OUTER JOIN src1 a ON (a.key = src.key)
 JOIN src1 b ON (b.key+src.key<= 102)
+ORDER BY src.key, src.value, a.key, a.value, b.key, b.value
 LIMIT 10;
 
 -- Right outer join with multiple inner joins and mixed conditions
@@ -196,6 +222,7 @@ RIGHT OUTER JOIN cbo_t1 t2 ON (t2.key = t1.key)
 JOIN cbo_t1 t3 ON (t3.key = t2.key or t3.value = t2.value and t2.c_int = t1.c_int)
 JOIN cbo_t1 t4 ON (t4.key = t2.key or  t2.c_float = t4.c_float and t4.value = t2.value)
 JOIN cbo_t1 t5 ON (t5.key = t2.key or  t2.c_boolean = t4.c_boolean and t5.c_int = 42)
+ORDER BY t1.key, t1.value, t1.c_int, t1.c_float, t1.c_boolean, t2.key, t2.value, t2.c_int, t2.c_float, t2.c_boolean, t3.key, t3.value, t3.c_int, t3.c_float, t3.c_boolean, t4.key, t4.value, t4.c_int, t4.c_float, t4.c_boolean, t5.key, t5.value, t5.c_int, t5.c_float, t5.c_boolean
 LIMIT 10;
 
 SELECT *
@@ -204,4 +231,5 @@ RIGHT OUTER JOIN cbo_t1 t2 ON (t2.key = t1.key)
 JOIN cbo_t1 t3 ON (t3.key = t2.key or t3.value = t2.value and t2.c_int = t1.c_int)
 JOIN cbo_t1 t4 ON (t4.key = t2.key or  t2.c_float = t4.c_float and t4.value = t2.value)
 JOIN cbo_t1 t5 ON (t5.key = t2.key or  t2.c_boolean = t4.c_boolean and t5.c_int = 42)
+ORDER BY t1.key, t1.value, t1.c_int, t1.c_float, t1.c_boolean, t2.key, t2.value, t2.c_int, t2.c_float, t2.c_boolean, t3.key, t3.value, t3.c_int, t3.c_float, t3.c_boolean, t4.key, t4.value, t4.c_int, t4.c_float, t4.c_boolean, t5.key, t5.value, t5.c_int, t5.c_float, t5.c_boolean
 LIMIT 10;
diff --git a/ql/src/test/queries/clientpositive/join_star.q b/ql/src/test/queries/clientpositive/join_star.q
index 70cc8f7..5d2ef96 100644
--- a/ql/src/test/queries/clientpositive/join_star.q
+++ b/ql/src/test/queries/clientpositive/join_star.q
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 create table fact(m1 int, m2 int, d1 int, d2 int);
 create table dim1(f1 int, f2 int);
diff --git a/ql/src/test/queries/clientpositive/leadlag_queries.q b/ql/src/test/queries/clientpositive/leadlag_queries.q
index 50210fe..7c62145 100644
--- a/ql/src/test/queries/clientpositive/leadlag_queries.q
+++ b/ql/src/test/queries/clientpositive/leadlag_queries.q
@@ -1,4 +1,6 @@
 --! qt:dataset:part
+-- SORT_QUERY_RESULTS
+
 -- 1. testLeadUDAF
 select p_mfgr, p_retailprice,
 lead(p_retailprice) over (partition by p_mfgr order by p_name) as l1,
diff --git a/ql/src/test/queries/clientpositive/load_dyn_part11.q b/ql/src/test/queries/clientpositive/load_dyn_part11.q
index 1e61981..62be4fd 100644
--- a/ql/src/test/queries/clientpositive/load_dyn_part11.q
+++ b/ql/src/test/queries/clientpositive/load_dyn_part11.q
@@ -1,4 +1,6 @@
 --! qt:dataset:srcpart
+-- SORT_QUERY_RESULTS
+
 show partitions srcpart;
 
 
diff --git a/ql/src/test/queries/clientpositive/masking_1.q b/ql/src/test/queries/clientpositive/masking_1.q
index d8d069f..781c62a 100644
--- a/ql/src/test/queries/clientpositive/masking_1.q
+++ b/ql/src/test/queries/clientpositive/masking_1.q
@@ -1,5 +1,7 @@
 --! qt:dataset:srcpart
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 set hive.security.authorization.enabled=true;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
diff --git a/ql/src/test/queries/clientpositive/masking_2.q b/ql/src/test/queries/clientpositive/masking_2.q
index eb581b4..e56edef 100644
--- a/ql/src/test/queries/clientpositive/masking_2.q
+++ b/ql/src/test/queries/clientpositive/masking_2.q
@@ -1,4 +1,6 @@
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 set hive.security.authorization.enabled=true;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
diff --git a/ql/src/test/queries/clientpositive/masking_3.q b/ql/src/test/queries/clientpositive/masking_3.q
index 2317b50..012ce8b 100644
--- a/ql/src/test/queries/clientpositive/masking_3.q
+++ b/ql/src/test/queries/clientpositive/masking_3.q
@@ -1,5 +1,7 @@
 --! qt:dataset:srcpart
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 set hive.security.authorization.enabled=true;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
diff --git a/ql/src/test/queries/clientpositive/masking_disablecbo_1.q b/ql/src/test/queries/clientpositive/masking_disablecbo_1.q
index 74771e5..b4067c8 100644
--- a/ql/src/test/queries/clientpositive/masking_disablecbo_1.q
+++ b/ql/src/test/queries/clientpositive/masking_disablecbo_1.q
@@ -1,5 +1,7 @@
 --! qt:dataset:srcpart
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 set hive.cbo.enable=false;
 set hive.mapred.mode=nonstrict;
 set hive.security.authorization.enabled=true;
diff --git a/ql/src/test/queries/clientpositive/masking_disablecbo_2.q b/ql/src/test/queries/clientpositive/masking_disablecbo_2.q
index 7be9975..0c28c50 100644
--- a/ql/src/test/queries/clientpositive/masking_disablecbo_2.q
+++ b/ql/src/test/queries/clientpositive/masking_disablecbo_2.q
@@ -1,4 +1,6 @@
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 set hive.cbo.enable=false;
 set hive.mapred.mode=nonstrict;
 set hive.security.authorization.enabled=true;
diff --git a/ql/src/test/queries/clientpositive/masking_disablecbo_3.q b/ql/src/test/queries/clientpositive/masking_disablecbo_3.q
index d76edc1..94e8db4 100644
--- a/ql/src/test/queries/clientpositive/masking_disablecbo_3.q
+++ b/ql/src/test/queries/clientpositive/masking_disablecbo_3.q
@@ -1,5 +1,7 @@
 --! qt:dataset:srcpart
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 set hive.cbo.enable=false;
 set hive.mapred.mode=nonstrict;
 set hive.security.authorization.enabled=true;
diff --git a/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out
deleted file mode 100644
index dd2ea4a..0000000
--- a/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out
+++ /dev/null
@@ -1,266 +0,0 @@
-PREHOOK: query: CREATE TABLE DEST1_n47(key ARRAY<STRING>, value BIGINT) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@DEST1_n47
-POSTHOOK: query: CREATE TABLE DEST1_n47(key ARRAY<STRING>, value BIGINT) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@DEST1_n47
-PREHOOK: query: CREATE TABLE DEST2_n8(key MAP<STRING, STRING>, value BIGINT) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@DEST2_n8
-POSTHOOK: query: CREATE TABLE DEST2_n8(key MAP<STRING, STRING>, value BIGINT) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@DEST2_n8
-PREHOOK: query: EXPLAIN
-FROM SRC
-INSERT OVERWRITE TABLE DEST1_n47 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10
-INSERT OVERWRITE TABLE DEST2_n8 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dest1_n47
-PREHOOK: Output: default@dest2_n8
-POSTHOOK: query: EXPLAIN
-FROM SRC
-INSERT OVERWRITE TABLE DEST1_n47 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10
-INSERT OVERWRITE TABLE DEST2_n8 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dest1_n47
-POSTHOOK: Output: default@dest2_n8
-STAGE DEPENDENCIES:
-  Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
-  Stage-4 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-2
-  Stage-6 depends on stages: Stage-5
-  Stage-1 depends on stages: Stage-6
-  Stage-7 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: key
-              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                aggregations: count(1)
-                keys: array(key) (type: array<string>)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 250 Data size: 482000 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: array<string>)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: array<string>)
-                  Statistics: Num rows: 250 Data size: 482000 Basic stats: COMPLETE Column stats: COMPLETE
-                  TopN Hash Memory Usage: 0.1
-                  value expressions: _col1 (type: bigint)
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: key, value
-              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                aggregations: count(1)
-                keys: map(key:value) (type: map<string,string>)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 250 Data size: 232000 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: array<string>)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 482000 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: array<string>)
-              null sort order: z
-              sort order: +
-              Statistics: Num rows: 250 Data size: 482000 Basic stats: COMPLETE Column stats: COMPLETE
-              TopN Hash Memory Usage: 0.1
-              value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: array<string>), VALUE._col0 (type: bigint)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 482000 Basic stats: COMPLETE Column stats: COMPLETE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 19280 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 10 Data size: 19280 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.dest1_n47
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.dest1_n47
-
-  Stage: Stage-4
-    Stats Work
-      Basic Stats Work:
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: map<string,string>)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: map<string,string>)
-              Statistics: Num rows: 250 Data size: 232000 Basic stats: COMPLETE Column stats: COMPLETE
-              TopN Hash Memory Usage: 0.1
-              value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: map<string,string>)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 232000 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-6
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: map<string,string>)
-              null sort order: z
-              sort order: +
-              Statistics: Num rows: 250 Data size: 232000 Basic stats: COMPLETE Column stats: COMPLETE
-              TopN Hash Memory Usage: 0.1
-              value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: map<string,string>), VALUE._col0 (type: bigint)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 232000 Basic stats: COMPLETE Column stats: COMPLETE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 9280 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 10 Data size: 9280 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.dest2_n8
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.dest2_n8
-
-  Stage: Stage-7
-    Stats Work
-      Basic Stats Work:
-
-PREHOOK: query: FROM SRC
-INSERT OVERWRITE TABLE DEST1_n47 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10
-INSERT OVERWRITE TABLE DEST2_n8 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@dest1_n47
-PREHOOK: Output: default@dest2_n8
-POSTHOOK: query: FROM SRC
-INSERT OVERWRITE TABLE DEST1_n47 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10
-INSERT OVERWRITE TABLE DEST2_n8 SELECT MAP(SRC.key, SRC.value) as kvmap, COUNT(1) GROUP BY MAP(SRC.key, SRC.value) ORDER BY kvmap limit 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@dest1_n47
-POSTHOOK: Output: default@dest2_n8
-POSTHOOK: Lineage: dest1_n47.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest1_n47.value EXPRESSION [(src)src.null, ]
-POSTHOOK: Lineage: dest2_n8.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest2_n8.value EXPRESSION [(src)src.null, ]
-PREHOOK: query: SELECT DEST1_n47.* FROM DEST1_n47
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dest1_n47
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT DEST1_n47.* FROM DEST1_n47
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dest1_n47
-#### A masked pattern was here ####
-["0"]	3
-["10"]	1
-["100"]	2
-["103"]	2
-["104"]	2
-["105"]	1
-["11"]	1
-["111"]	1
-["113"]	2
-["114"]	1
-PREHOOK: query: SELECT DEST2_n8.* FROM DEST2_n8
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dest2_n8
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT DEST2_n8.* FROM DEST2_n8
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dest2_n8
-#### A masked pattern was here ####
-{"0":"val_0"}	3
-{"10":"val_10"}	1
-{"100":"val_100"}	2
-{"103":"val_103"}	2
-{"104":"val_104"}	2
-{"105":"val_105"}	1
-{"11":"val_11"}	1
-{"111":"val_111"}	1
-{"113":"val_113"}	2
-{"114":"val_114"}	1
diff --git a/ql/src/test/results/clientpositive/groupby_cube1.q.out b/ql/src/test/results/clientpositive/groupby_cube1.q.out
deleted file mode 100644
index 0ac1490..0000000
--- a/ql/src/test/results/clientpositive/groupby_cube1.q.out
+++ /dev/null
@@ -1,884 +0,0 @@
-PREHOOK: query: CREATE TABLE T1_n82(key STRING, val STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1_n82
-POSTHOOK: query: CREATE TABLE T1_n82(key STRING, val STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1_n82
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n82
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n82
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n82
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n82
-PREHOOK: query: EXPLAIN
-SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n82
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: key (type: string), val (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: EXPLAIN
-SELECT key, val, count(1) FROM T1_n82 GROUP BY CUBE(key, val)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT key, val, count(1) FROM T1_n82 GROUP BY CUBE(key, val)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n82
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: key (type: string), val (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-1	11	1
-1	NULL	1
-2	12	1
-2	NULL	1
-3	13	1
-3	NULL	1
-7	17	1
-7	NULL	1
-8	18	1
-8	28	1
-8	NULL	2
-NULL	11	1
-NULL	12	1
-NULL	13	1
-NULL	17	1
-NULL	18	1
-NULL	28	1
-NULL	NULL	6
-PREHOOK: query: EXPLAIN
-SELECT key, val, GROUPING__ID, count(1) FROM T1_n82 GROUP BY key, val with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT key, val, GROUPING__ID, count(1) FROM T1_n82 GROUP BY key, val with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n82
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, val, GROUPING__ID, count(1) FROM T1_n82 GROUP BY key, val with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, val, GROUPING__ID, count(1) FROM T1_n82 GROUP BY key, val with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-1	11	0	1
-1	NULL	1	1
-2	12	0	1
-2	NULL	1	1
-3	13	0	1
-3	NULL	1	1
-7	17	0	1
-7	NULL	1	1
-8	18	0	1
-8	28	0	1
-8	NULL	1	2
-NULL	11	2	1
-NULL	12	2	1
-NULL	13	2	1
-NULL	17	2	1
-NULL	18	2	1
-NULL	28	2	1
-NULL	NULL	3	6
-PREHOOK: query: EXPLAIN
-SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n82
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count(DISTINCT val)
-                keys: key (type: string), 0L (type: bigint), val (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
-                  Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(DISTINCT KEY._col2:0._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col2
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col2 (type: bigint)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-1	1
-2	1
-3	1
-7	1
-8	2
-NULL	6
-PREHOOK: query: EXPLAIN
-SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n82
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: key (type: string), val (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: rand() (type: double)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, val, count(1) FROM T1_n82 GROUP BY key, val with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-1	11	1
-1	NULL	1
-2	12	1
-2	NULL	1
-3	13	1
-3	NULL	1
-7	17	1
-7	NULL	1
-8	18	1
-8	28	1
-8	NULL	2
-NULL	11	1
-NULL	12	1
-NULL	13	1
-NULL	17	1
-NULL	18	1
-NULL	28	1
-NULL	NULL	6
-PREHOOK: query: EXPLAIN
-SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n82
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count(DISTINCT val)
-                keys: key (type: string), 0L (type: bigint), val (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(DISTINCT KEY._col2:0._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: bigint)
-              null sort order: zz
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col2
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col2 (type: bigint)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n82 GROUP BY key with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-#### A masked pattern was here ####
-1	1
-2	1
-3	1
-7	1
-8	2
-NULL	6
-PREHOOK: query: CREATE TABLE T2_n51(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T2_n51
-POSTHOOK: query: CREATE TABLE T2_n51(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T2_n51
-PREHOOK: query: CREATE TABLE T3_n16(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T3_n16
-POSTHOOK: query: CREATE TABLE T3_n16(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T3_n16
-PREHOOK: query: EXPLAIN
-FROM T1_n82
-INSERT OVERWRITE TABLE T2_n51 SELECT key, val, count(1) group by key, val with cube
-INSERT OVERWRITE TABLE T3_n16 SELECT key, val, sum(1) group by key, val with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-PREHOOK: Output: default@t2_n51
-PREHOOK: Output: default@t3_n16
-POSTHOOK: query: EXPLAIN
-FROM T1_n82
-INSERT OVERWRITE TABLE T2_n51 SELECT key, val, count(1) group by key, val with cube
-INSERT OVERWRITE TABLE T3_n16 SELECT key, val, sum(1) group by key, val with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-POSTHOOK: Output: default@t2_n51
-POSTHOOK: Output: default@t3_n16
-STAGE DEPENDENCIES:
-  Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
-  Stage-4 depends on stages: Stage-0, Stage-5, Stage-9
-  Stage-5 depends on stages: Stage-3
-  Stage-8 depends on stages: Stage-1, Stage-5, Stage-9
-  Stage-6 depends on stages: Stage-2
-  Stage-7 depends on stages: Stage-6
-  Stage-1 depends on stages: Stage-7
-  Stage-9 depends on stages: Stage-7
-
-STAGE PLANS:
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n82
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count(1)
-                keys: key (type: string), val (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: rand() (type: double)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-            Select Operator
-              expressions: key (type: string), val (type: string)
-              outputColumnNames: key, val
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: sum(1)
-                keys: key (type: string), val (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.t2_n51
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
-              outputColumnNames: key1, key2, val
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: compute_stats(key1, 'hll'), compute_stats(key2, 'hll'), compute_stats(val, 'hll')
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 1680 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.t2_n51
-
-  Stage: Stage-4
-    Stats Work
-      Basic Stats Work:
-      Column Stats Desc:
-          Columns: key1, key2, val
-          Column Types: string, string, int
-          Table: default.t2_n51
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              null sort order: 
-              sort order: 
-              Statistics: Num rows: 1 Data size: 1680 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
-          mode: final
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 1696 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 1696 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-8
-    Stats Work
-      Basic Stats Work:
-      Column Stats Desc:
-          Columns: key1, key2, val
-          Column Types: string, string, int
-          Table: default.t3_n16
-
-  Stage: Stage-6
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: rand() (type: double)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-7
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.t3_n16
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
-              outputColumnNames: key1, key2, val
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: compute_stats(key1, 'hll'), compute_stats(key2, 'hll'), compute_stats(val, 'hll')
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 1680 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.t3_n16
-
-  Stage: Stage-9
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              null sort order: 
-              sort order: 
-              Statistics: Num rows: 1 Data size: 1680 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
-          mode: final
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 1696 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 1696 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-PREHOOK: query: FROM T1_n82
-INSERT OVERWRITE TABLE T2_n51 SELECT key, val, count(1) group by key, val with cube
-INSERT OVERWRITE TABLE T3_n16 SELECT key, val, sum(1) group by key, val with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n82
-PREHOOK: Output: default@t2_n51
-PREHOOK: Output: default@t3_n16
-POSTHOOK: query: FROM T1_n82
-INSERT OVERWRITE TABLE T2_n51 SELECT key, val, count(1) group by key, val with cube
-INSERT OVERWRITE TABLE T3_n16 SELECT key, val, sum(1) group by key, val with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n82
-POSTHOOK: Output: default@t2_n51
-POSTHOOK: Output: default@t3_n16
-POSTHOOK: Lineage: t2_n51.key1 SIMPLE [(t1_n82)t1_n82.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: t2_n51.key2 SIMPLE [(t1_n82)t1_n82.FieldSchema(name:val, type:string, comment:null), ]
-POSTHOOK: Lineage: t2_n51.val EXPRESSION [(t1_n82)t1_n82.null, ]
-POSTHOOK: Lineage: t3_n16.key1 SIMPLE [(t1_n82)t1_n82.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: t3_n16.key2 SIMPLE [(t1_n82)t1_n82.FieldSchema(name:val, type:string, comment:null), ]
-POSTHOOK: Lineage: t3_n16.val EXPRESSION [(t1_n82)t1_n82.null, ]
diff --git a/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out b/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out
deleted file mode 100644
index af37eac..0000000
--- a/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out
+++ /dev/null
@@ -1,250 +0,0 @@
-PREHOOK: query: create table t1_n21 like src
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@t1_n21
-POSTHOOK: query: create table t1_n21 like src
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@t1_n21
-PREHOOK: query: create table t2_n13 like src
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@t2_n13
-POSTHOOK: query: create table t2_n13 like src
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@t2_n13
-PREHOOK: query: explain from src
-insert into table t1_n21 select
-key, GROUPING__ID
-group by cube(key, value)
-insert into table t2_n13 select
-key, value
-group by key, value grouping sets ((key), (key, value))
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@t1_n21
-PREHOOK: Output: default@t2_n13
-POSTHOOK: query: explain from src
-insert into table t1_n21 select
-key, GROUPING__ID
-group by cube(key, value)
-insert into table t2_n13 select
-key, value
-group by key, value grouping sets ((key), (key, value))
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@t1_n21
-POSTHOOK: Output: default@t2_n13
-STAGE DEPENDENCIES:
-  Stage-2 is a root stage
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0, Stage-4, Stage-7
-  Stage-4 depends on stages: Stage-2
-  Stage-6 depends on stages: Stage-1, Stage-4, Stage-7
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
-  Stage-7 depends on stages: Stage-5
-
-STAGE PLANS:
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: key, value
-              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                keys: key (type: string), value (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: key, value
-              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                keys: key (type: string), value (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1000 Data size: 186000 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: string), CAST( _col2 AS STRING) (type: string)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1000 Data size: 271000 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1000 Data size: 271000 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.t1_n21
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1000 Data size: 271000 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll')
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: false
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.t1_n21
-
-  Stage: Stage-3
-    Stats Work
-      Basic Stats Work:
-      Column Stats Desc:
-          Columns: key, value
-          Column Types: string, string
-          Table: default.t1_n21
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              null sort order: 
-              sort order: 
-              Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-6
-    Stats Work
-      Basic Stats Work:
-      Column Stats Desc:
-          Columns: key, value
-          Column Types: string, string
-          Table: default.t2_n13
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE
-          pruneGroupingSetId: true
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.t2_n13
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string)
-            outputColumnNames: key, value
-            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-            Group By Operator
-              aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll')
-              minReductionHashAggr: 0.99
-              mode: hash
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: false
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.t2_n13
-
-  Stage: Stage-7
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              null sort order: 
-              sort order: 
-              Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
diff --git a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
deleted file mode 100644
index 44e8ef6..0000000
--- a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
+++ /dev/null
@@ -1,319 +0,0 @@
-PREHOOK: query: explain
-select distinct key, "" as dummy1, "" as dummy2 from src tablesample (10 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select distinct key, "" as dummy1, "" as dummy2 from src tablesample (10 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Row Limit Per Split: 10
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: key
-              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                keys: key (type: string), '' (type: string), '' (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 250 Data size: 63750 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), '' (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), '' (type: string)
-                  Statistics: Num rows: 250 Data size: 63750 Basic stats: COMPLETE Column stats: COMPLETE
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), '' (type: string), '' (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 250 Data size: 63750 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: string), '' (type: string), '' (type: string)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 250 Data size: 63750 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 250 Data size: 63750 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select distinct key, "" as dummy1, "" as dummy2 from src tablesample (10 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select distinct key, "" as dummy1, "" as dummy2 from src tablesample (10 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-165		
-238		
-255		
-27		
-278		
-311		
-409		
-484		
-86		
-98		
-PREHOOK: query: explain
-create table dummy_n6 as
-select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows)
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy_n6
-POSTHOOK: query: explain
-create table dummy_n6 as
-select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows)
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy_n6
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-4 depends on stages: Stage-0, Stage-3
-  Stage-2 depends on stages: Stage-4
-  Stage-3 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Row Limit Per Split: 10
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: key
-              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                keys: key (type: string), 'X' (type: string), 'X' (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 250 Data size: 64250 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), 'X' (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), 'X' (type: string)
-                  Statistics: Num rows: 250 Data size: 64250 Basic stats: COMPLETE Column stats: COMPLETE
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), 'X' (type: string), 'X' (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 250 Data size: 64250 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: string), 'X' (type: string), 'X' (type: string)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 250 Data size: 64250 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 250 Data size: 64250 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.dummy_n6
-            Select Operator
-              expressions: _col0 (type: string), 'X' (type: string), 'X' (type: string)
-              outputColumnNames: col1, col2, col3
-              Statistics: Num rows: 250 Data size: 64250 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll'), compute_stats(col3, 'hll')
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-4
-    Create Table
-      columns: key string, dummy1 string, dummy2 string
-      name: default.dummy_n6
-      input format: org.apache.hadoop.mapred.TextInputFormat
-      output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-      serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-2
-    Stats Work
-      Basic Stats Work:
-      Column Stats Desc:
-          Columns: key, dummy1, dummy2
-          Column Types: string, string, string
-          Table: default.dummy_n6
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              null sort order: 
-              sort order: 
-              Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-PREHOOK: query: create table dummy_n6 as
-select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows)
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dummy_n6
-POSTHOOK: query: create table dummy_n6 as
-select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows)
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dummy_n6
-POSTHOOK: Lineage: dummy_n6.dummy1 SIMPLE []
-POSTHOOK: Lineage: dummy_n6.dummy2 SIMPLE []
-POSTHOOK: Lineage: dummy_n6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: select key,dummy1,dummy2 from dummy_n6
-PREHOOK: type: QUERY
-PREHOOK: Input: default@dummy_n6
-#### A masked pattern was here ####
-POSTHOOK: query: select key,dummy1,dummy2 from dummy_n6
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@dummy_n6
-#### A masked pattern was here ####
-165	X	X
-238	X	X
-255	X	X
-27	X	X
-278	X	X
-311	X	X
-409	X	X
-484	X	X
-86	X	X
-98	X	X
-PREHOOK: query: explain
-select max('pants'), max('pANTS') from src group by key limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select max('pants'), max('pANTS') from src group by key limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                aggregations: max('pants'), max('pANTS')
-                keys: _col0 (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 250 Data size: 113750 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 250 Data size: 113750 Basic stats: COMPLETE Column stats: COMPLETE
-                  TopN Hash Memory Usage: 0.1
-                  value expressions: _col1 (type: string), _col2 (type: string)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: max(VALUE._col0), max(VALUE._col1)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 250 Data size: 113750 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col1 (type: string), _col2 (type: string)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 250 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE
-            Limit
-              Number of rows: 1
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select max('pants'), max('pANTS') from src group by key limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select max('pants'), max('pANTS') from src group by key limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-pants	pANTS
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out
deleted file mode 100644
index cdc063b..0000000
--- a/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out
+++ /dev/null
@@ -1,207 +0,0 @@
-PREHOOK: query: CREATE TABLE T1_n86(key INT, value INT) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1_n86
-POSTHOOK: query: CREATE TABLE T1_n86(key INT, value INT) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1_n86
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n86
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n86
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n86
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n86
-PREHOOK: query: EXPLAIN
-SELECT key, value, GROUPING__ID, count(*)
-FROM T1_n86
-GROUP BY key, value
-GROUPING SETS ((), (key))
-HAVING GROUPING__ID = 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n86
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT key, value, GROUPING__ID, count(*)
-FROM T1_n86
-GROUP BY key, value
-GROUPING SETS ((), (key))
-HAVING GROUPING__ID = 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n86
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n86
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: key (type: int), value (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (_col2 = 1L) (type: boolean)
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int), _col1 (type: int), 1L (type: bigint)
-                    null sort order: zzz
-                    sort order: +++
-                    Map-reduce partition columns: _col0 (type: int), _col1 (type: int), 1L (type: bigint)
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), 1L (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), 1L (type: bigint), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, value, GROUPING__ID, count(*)
-FROM T1_n86
-GROUP BY key, value
-GROUPING SETS ((), (key))
-HAVING GROUPING__ID = 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n86
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, GROUPING__ID, count(*)
-FROM T1_n86
-GROUP BY key, value
-GROUPING SETS ((), (key))
-HAVING GROUPING__ID = 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n86
-#### A masked pattern was here ####
-1	NULL	1	2
-2	NULL	1	1
-3	NULL	1	2
-4	NULL	1	1
-PREHOOK: query: EXPLAIN
-SELECT key, value, GROUPING__ID, count(*)
-FROM T1_n86
-GROUP BY key, value
-GROUPING SETS ((), (key))
-HAVING GROUPING__ID = 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n86
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT key, value, GROUPING__ID, count(*)
-FROM T1_n86
-GROUP BY key, value
-GROUPING SETS ((), (key))
-HAVING GROUPING__ID = 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n86
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n86
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (_col2 = 1L) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: int), _col1 (type: int), 1L (type: bigint), _col3 (type: bigint)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT key, value, GROUPING__ID, count(*)
-FROM T1_n86
-GROUP BY key, value
-GROUPING SETS ((), (key))
-HAVING GROUPING__ID = 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n86
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value, GROUPING__ID, count(*)
-FROM T1_n86
-GROUP BY key, value
-GROUPING SETS ((), (key))
-HAVING GROUPING__ID = 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n86
-#### A masked pattern was here ####
-1	NULL	1	2
-2	NULL	1	1
-3	NULL	1	2
-4	NULL	1	1
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out
deleted file mode 100644
index 43ab99b..0000000
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out
+++ /dev/null
@@ -1,620 +0,0 @@
-PREHOOK: query: CREATE TABLE T1_n41(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1_n41
-POSTHOOK: query: CREATE TABLE T1_n41(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1_n41
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n41
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n41
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n41
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n41
-PREHOOK: query: SELECT * FROM T1_n41
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM T1_n41
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-t1_n41.a	t1_n41.b	t1_n41.c
-1	1	3
-2	2	4
-2	3	5
-3	2	8
-5	2	2
-8	1	1
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) from T1_n41 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) from T1_n41 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n41
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, count(*) from T1_n41 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, count(*) from T1_n41 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-a	b	_c2
-1	1	1
-1	NULL	1
-2	2	1
-2	3	1
-2	NULL	2
-3	2	1
-3	NULL	1
-5	2	1
-5	NULL	1
-8	1	1
-8	NULL	1
-NULL	1	2
-NULL	2	3
-NULL	3	1
-NULL	NULL	6
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) from T1_n41 group by cube(a, b)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) from T1_n41 group by cube(a, b)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n41
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, count(*) from T1_n41 group by cube(a, b)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, count(*) from T1_n41 group by cube(a, b)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-a	b	_c2
-1	1	1
-1	NULL	1
-2	2	1
-2	3	1
-2	NULL	2
-3	2	1
-3	NULL	1
-5	2	1
-5	NULL	1
-8	1	1
-8	NULL	1
-NULL	1	2
-NULL	2	3
-NULL	3	1
-NULL	NULL	6
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ())
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ())
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n41
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ())
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ())
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-a	b	_c2
-1	1	1
-1	NULL	1
-2	2	1
-2	3	1
-2	NULL	2
-3	2	1
-3	NULL	1
-5	2	1
-5	NULL	1
-8	1	1
-8	NULL	1
-NULL	1	2
-NULL	2	3
-NULL	3	1
-NULL	NULL	6
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b GROUPING SETS (a, (a, b))
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b GROUPING SETS (a, (a, b))
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n41
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b GROUPING SETS (a, (a, b))
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, count(*) FROM T1_n41 GROUP BY a, b GROUPING SETS (a, (a, b))
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-a	b	_c2
-1	1	1
-1	NULL	1
-2	2	1
-2	3	1
-2	NULL	2
-3	2	1
-3	NULL	1
-5	2	1
-5	NULL	1
-8	1	1
-8	NULL	1
-PREHOOK: query: EXPLAIN
-SELECT a FROM T1_n41 GROUP BY a, b, c GROUPING SETS (a, b, c)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a FROM T1_n41 GROUP BY a, b, c GROUPING SETS (a, b, c)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n41
-            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string), c (type: string)
-              outputColumnNames: a, b, c
-              Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: a (type: string), b (type: string), c (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 3 Data size: 1656 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint)
-                  null sort order: zzzz
-                  sort order: ++++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint)
-                  Statistics: Num rows: 3 Data size: 1656 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string)
-            outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a FROM T1_n41 GROUP BY a, b, c GROUPING SETS (a, b, c)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a FROM T1_n41 GROUP BY a, b, c GROUPING SETS (a, b, c)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-a
-1
-2
-3
-5
-8
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-NULL
-PREHOOK: query: EXPLAIN
-SELECT a FROM T1_n41 GROUP BY a GROUPING SETS ((a), (a))
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a FROM T1_n41 GROUP BY a GROUPING SETS ((a), (a))
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n41
-            Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string)
-              outputColumnNames: a
-              Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: a (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a FROM T1_n41 GROUP BY a GROUPING SETS ((a), (a))
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a FROM T1_n41 GROUP BY a GROUPING SETS ((a), (a))
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-a
-1
-2
-3
-5
-8
-PREHOOK: query: EXPLAIN
-SELECT a + b, count(*) FROM T1_n41 GROUP BY a + b GROUPING SETS (a+b)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a + b, count(*) FROM T1_n41 GROUP BY a + b GROUPING SETS (a+b)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n41
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: (UDFToDouble(a) + UDFToDouble(b)) (type: double)
-              outputColumnNames: _col0
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: _col0 (type: double)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: double)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: double)
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: double)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a + b, count(*) FROM T1_n41 GROUP BY a + b GROUPING SETS (a+b)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a + b, count(*) FROM T1_n41 GROUP BY a + b GROUPING SETS (a+b)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n41
-#### A masked pattern was here ####
-_c0	_c1
-2.0	1
-4.0	1
-5.0	2
-7.0	1
-9.0	1
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out
deleted file mode 100644
index 7831a49..0000000
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out
+++ /dev/null
@@ -1,479 +0,0 @@
-PREHOOK: query: CREATE TABLE T1_n81(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1_n81
-POSTHOOK: query: CREATE TABLE T1_n81(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1_n81
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n81
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n81
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n81
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n81
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) from T1_n81 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n81
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) from T1_n81 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n81
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n81
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) from T1_n81 group by cube(a, b)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n81
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) from T1_n81 group by cube(a, b)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n81
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n81
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, count(*) from T1_n81 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n81
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, count(*) from T1_n81 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n81
-#### A masked pattern was here ####
-a	b	_c2
-1	1	1
-1	NULL	1
-2	2	1
-2	3	1
-2	NULL	2
-3	2	1
-3	NULL	1
-5	2	1
-5	NULL	1
-8	1	1
-8	NULL	1
-NULL	1	2
-NULL	2	3
-NULL	3	1
-NULL	NULL	6
-PREHOOK: query: EXPLAIN
-SELECT a, b, sum(c) from T1_n81 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n81
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, sum(c) from T1_n81 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n81
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n81
-            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string), c (type: string)
-              outputColumnNames: a, b, c
-              Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: sum(c)
-                keys: a (type: string), b (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 4 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: double)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, sum(c) from T1_n81 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n81
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, sum(c) from T1_n81 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n81
-#### A masked pattern was here ####
-a	b	_c2
-1	1	3.0
-1	NULL	3.0
-2	2	4.0
-2	3	5.0
-2	NULL	9.0
-3	2	8.0
-3	NULL	8.0
-5	2	2.0
-5	NULL	2.0
-8	1	1.0
-8	NULL	1.0
-NULL	1	4.0
-NULL	2	14.0
-NULL	3	5.0
-NULL	NULL	23.0
-PREHOOK: query: CREATE TABLE T2_n50(a STRING, b STRING, c int, d int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T2_n50
-POSTHOOK: query: CREATE TABLE T2_n50(a STRING, b STRING, c int, d int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T2_n50
-PREHOOK: query: INSERT OVERWRITE TABLE T2_n50
-SELECT a, b, c, c from T1_n81
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n81
-PREHOOK: Output: default@t2_n50
-POSTHOOK: query: INSERT OVERWRITE TABLE T2_n50
-SELECT a, b, c, c from T1_n81
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n81
-POSTHOOK: Output: default@t2_n50
-POSTHOOK: Lineage: t2_n50.a SIMPLE [(t1_n81)t1_n81.FieldSchema(name:a, type:string, comment:null), ]
-POSTHOOK: Lineage: t2_n50.b SIMPLE [(t1_n81)t1_n81.FieldSchema(name:b, type:string, comment:null), ]
-POSTHOOK: Lineage: t2_n50.c EXPRESSION [(t1_n81)t1_n81.FieldSchema(name:c, type:string, comment:null), ]
-POSTHOOK: Lineage: t2_n50.d EXPRESSION [(t1_n81)t1_n81.FieldSchema(name:c, type:string, comment:null), ]
-_col0	_col1	_col2	_col3
-PREHOOK: query: EXPLAIN
-SELECT a, b, sum(c+d) from T2_n50 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t2_n50
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, sum(c+d) from T2_n50 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t2_n50
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t2_n50
-            Statistics: Num rows: 6 Data size: 1068 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: a (type: string), b (type: string), (c + d) (type: int)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 6 Data size: 1068 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                aggregations: sum(_col2)
-                keys: _col0 (type: string), _col1 (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 3 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 3 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col2 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 12 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 12 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 12 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 12 Data size: 2136 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 12 Data size: 2136 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, sum(c+d) from T2_n50 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t2_n50
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, sum(c+d) from T2_n50 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t2_n50
-#### A masked pattern was here ####
-a	b	_c2
-1	1	6
-1	NULL	6
-2	2	8
-2	3	10
-2	NULL	18
-3	2	16
-3	NULL	16
-5	2	4
-5	NULL	4
-8	1	2
-8	NULL	2
-NULL	1	8
-NULL	2	28
-NULL	3	10
-NULL	NULL	46
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out
deleted file mode 100644
index a08dd02..0000000
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out
+++ /dev/null
@@ -1,298 +0,0 @@
-PREHOOK: query: CREATE TABLE T1_n118(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1_n118
-POSTHOOK: query: CREATE TABLE T1_n118(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1_n118
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1_n118
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n118
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1_n118
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n118
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1_n118
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n118
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1_n118
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n118
-PREHOOK: query: EXPLAIN
-SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n118
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n118
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n118
-            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string), c (type: string)
-              outputColumnNames: a, b, c
-              Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: sum(c), count(c), count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Statistics: Num rows: 4 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: double), _col4 (type: bigint), _col5 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0), count(VALUE._col1), count(VALUE._col2)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3, _col4, _col5
-          Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), (_col3 / _col4) (type: double), _col5 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: EXPLAIN
-SELECT a, b, avg(c), count(*) from T1_n118 group by cube(a, b)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n118
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, avg(c), count(*) from T1_n118 group by cube(a, b)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n118
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n118
-            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string), c (type: string)
-              outputColumnNames: a, b, c
-              Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: sum(c), count(c), count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Statistics: Num rows: 4 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: double), _col4 (type: bigint), _col5 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0), count(VALUE._col1), count(VALUE._col2)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3, _col4, _col5
-          Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), (_col3 / _col4) (type: double), _col5 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n118
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n118
-#### A masked pattern was here ####
-a	b	_c2	_c3
-1	1	3.0	2
-1	2	2.0	1
-1	NULL	2.6666666666666665	3
-2	2	5.333333333333333	3
-2	3	5.0	2
-2	NULL	5.2	5
-3	2	8.0	1
-3	NULL	8.0	1
-5	1	2.0	1
-5	NULL	2.0	1
-8	1	1.0	2
-8	NULL	1.0	2
-NULL	1	2.0	5
-NULL	2	5.2	5
-NULL	3	5.0	2
-NULL	NULL	3.8333333333333335	12
-PREHOOK: query: EXPLAIN
-SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n118
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n118
-#### A masked pattern was here ####
-Explain
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n118
-            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string), c (type: string)
-              outputColumnNames: a, b, c
-              Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: sum(c), count(c), count()
-                keys: a (type: string), b (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: double), _col3 (type: bigint), _col4 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0), count(VALUE._col1), count(VALUE._col2)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 4 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 4 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: double), _col4 (type: bigint), _col5 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0), count(VALUE._col1), count(VALUE._col2)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3, _col4, _col5
-          Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), (_col3 / _col4) (type: double), _col5 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 1104 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n118
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, avg(c), count(*) from T1_n118 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n118
-#### A masked pattern was here ####
-a	b	_c2	_c3
-1	1	3.0	2
-1	2	2.0	1
-1	NULL	2.6666666666666665	3
-2	2	5.333333333333333	3
-2	3	5.0	2
-2	NULL	5.2	5
-3	2	8.0	1
-3	NULL	8.0	1
-5	1	2.0	1
-5	NULL	2.0	1
-8	1	1.0	2
-8	NULL	1.0	2
-NULL	1	2.0	5
-NULL	2	5.2	5
-NULL	3	5.0	2
-NULL	NULL	3.8333333333333335	12
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out
deleted file mode 100644
index b61aba9..0000000
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out
+++ /dev/null
@@ -1,603 +0,0 @@
-PREHOOK: query: CREATE TABLE T1_n143(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1_n143
-POSTHOOK: query: CREATE TABLE T1_n143(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1_n143
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n143
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n143
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n143
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n143
-PREHOOK: query: EXPLAIN
-SELECT * FROM
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1
-join
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2
-on subq1.a = subq2.a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n143
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT * FROM
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1
-join
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2
-on subq1.a = subq2.a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n143
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n143
-            filterExpr: (UDFToDouble(a) < 3.0D) (type: boolean)
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (UDFToDouble(a) < 3.0D) (type: boolean)
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Filter Operator
-            predicate: _col0 is not null (type: boolean)
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string), _col2 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string), _col2 (type: bigint)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 2 Data size: 809 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 2 Data size: 809 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n143
-            filterExpr: (UDFToDouble(a) < 3.0D) (type: boolean)
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (UDFToDouble(a) < 3.0D) (type: boolean)
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Filter Operator
-            predicate: _col0 is not null (type: boolean)
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: EXPLAIN
-SELECT * FROM
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by cube(a, b) ) subq1
-join
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by cube(a, b) ) subq2
-on subq1.a = subq2.a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n143
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT * FROM
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by cube(a, b) ) subq1
-join
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by cube(a, b) ) subq2
-on subq1.a = subq2.a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n143
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n143
-            filterExpr: (UDFToDouble(a) < 3.0D) (type: boolean)
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (UDFToDouble(a) < 3.0D) (type: boolean)
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Filter Operator
-            predicate: _col0 is not null (type: boolean)
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string), _col2 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string), _col2 (type: bigint)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 2 Data size: 809 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 2 Data size: 809 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n143
-            filterExpr: (UDFToDouble(a) < 3.0D) (type: boolean)
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (UDFToDouble(a) < 3.0D) (type: boolean)
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Filter Operator
-            predicate: _col0 is not null (type: boolean)
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT * FROM
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1
-join
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2
-on subq1.a = subq2.a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n143
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1
-join
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2
-on subq1.a = subq2.a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n143
-#### A masked pattern was here ####
-1	1	1	1	1	1
-1	1	1	1	NULL	1
-1	NULL	1	1	1	1
-1	NULL	1	1	NULL	1
-2	2	1	2	2	1
-2	2	1	2	3	1
-2	2	1	2	NULL	2
-2	3	1	2	2	1
-2	3	1	2	3	1
-2	3	1	2	NULL	2
-2	NULL	2	2	2	1
-2	NULL	2	2	3	1
-2	NULL	2	2	NULL	2
-PREHOOK: query: EXPLAIN
-SELECT * FROM
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1
-join
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2
-on subq1.a = subq2.a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n143
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT * FROM
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1
-join
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2
-on subq1.a = subq2.a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n143
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-2, Stage-5
-  Stage-4 is a root stage
-  Stage-5 depends on stages: Stage-4
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n143
-            filterExpr: (UDFToDouble(a) < 3.0D) (type: boolean)
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (UDFToDouble(a) < 3.0D) (type: boolean)
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Filter Operator
-            predicate: _col0 is not null (type: boolean)
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string), _col2 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string), _col2 (type: bigint)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 2 Data size: 809 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 2 Data size: 809 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n143
-            filterExpr: (UDFToDouble(a) < 3.0D) (type: boolean)
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (UDFToDouble(a) < 3.0D) (type: boolean)
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Filter Operator
-            predicate: _col0 is not null (type: boolean)
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT * FROM
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1
-join
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2
-on subq1.a = subq2.a
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n143
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq1
-join
-(SELECT a, b, count(*) from T1_n143 where a < 3 group by a, b with cube) subq2
-on subq1.a = subq2.a
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n143
-#### A masked pattern was here ####
-1	1	1	1	1	1
-1	1	1	1	NULL	1
-1	NULL	1	1	1	1
-1	NULL	1	1	NULL	1
-2	2	1	2	2	1
-2	2	1	2	3	1
-2	2	1	2	NULL	2
-2	3	1	2	2	1
-2	3	1	2	3	1
-2	3	1	2	NULL	2
-2	NULL	2	2	2	1
-2	NULL	2	2	3	1
-2	NULL	2	2	NULL	2
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out
deleted file mode 100644
index b6b4dcb..0000000
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out
+++ /dev/null
@@ -1,383 +0,0 @@
-PREHOOK: query: CREATE TABLE T1_n24(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1_n24
-POSTHOOK: query: CREATE TABLE T1_n24(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1_n24
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n24
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n24
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n24
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n24
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM
-(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n24
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM
-(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n24
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n24
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: a (type: string), b (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          Group By Operator
-            aggregations: count()
-            keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint)
-            minReductionHashAggr: 0.99
-            mode: hash
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM
-(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by cube(a, b)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n24
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM
-(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by cube(a, b)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n24
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n24
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: a (type: string), b (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          Group By Operator
-            aggregations: count()
-            keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint)
-            minReductionHashAggr: 0.99
-            mode: hash
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, count(*) FROM
-(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n24
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, count(*) FROM
-(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n24
-#### A masked pattern was here ####
-1	1	1
-1	NULL	1
-2	2	1
-2	3	1
-2	NULL	2
-3	2	1
-3	NULL	1
-5	2	1
-5	NULL	1
-8	1	1
-8	NULL	1
-NULL	1	2
-NULL	2	3
-NULL	3	1
-NULL	NULL	6
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM
-(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n24
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM
-(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n24
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n24
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: a (type: string), b (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          Group By Operator
-            aggregations: count()
-            keys: _col0 (type: string), _col1 (type: string)
-            minReductionHashAggr: 0.99
-            mode: hash
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              null sort order: zz
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint)
-          mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              null sort order: zzz
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-              Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: final
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, count(*) FROM
-(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n24
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, count(*) FROM
-(SELECT a, b, count(1) from T1_n24 group by a, b) subq1 group by a, b with cube
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n24
-#### A masked pattern was here ####
-1	1	1
-1	NULL	1
-2	2	1
-2	3	1
-2	NULL	2
-3	2	1
-3	NULL	1
-5	2	1
-5	NULL	1
-8	1	1
-8	NULL	1
-NULL	1	2
-NULL	2	3
-NULL	3	1
-NULL	NULL	6
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out
deleted file mode 100644
index f6571b4..0000000
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out
+++ /dev/null
@@ -1,166 +0,0 @@
-PREHOOK: query: CREATE TABLE T1_n75(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1_n75
-POSTHOOK: query: CREATE TABLE T1_n75(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1_n75
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n75
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n75
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n75
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n75
-PREHOOK: query: EXPLAIN
-SELECT a, b FROM
-(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res
-WHERE res.a=5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n75
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b FROM
-(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res
-WHERE res.a=5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n75
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n75
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (UDFToDouble(a) = 5.0D) (type: boolean)
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b FROM
-(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res
-WHERE res.a=5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n75
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b FROM
-(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res
-WHERE res.a=5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n75
-#### A masked pattern was here ####
-5	2
-5	NULL
-PREHOOK: query: EXPLAIN
-SELECT a, b FROM
-(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res
-WHERE res.a=5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n75
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b FROM
-(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res
-WHERE res.a=5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n75
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n75
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (UDFToDouble(a) = 5.0D) (type: boolean)
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b FROM
-(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res
-WHERE res.a=5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n75
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b FROM
-(SELECT a, b from T1_n75 group by a, b grouping sets ( (a,b),a )) res
-WHERE res.a=5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n75
-#### A masked pattern was here ####
-5	2
-5	NULL
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out
deleted file mode 100644
index 93e081b..0000000
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out
+++ /dev/null
@@ -1,1408 +0,0 @@
-PREHOOK: query: CREATE TABLE T1_n64(key INT, value INT) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1_n64
-POSTHOOK: query: CREATE TABLE T1_n64(key INT, value INT) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1_n64
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n64
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n64
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1_n64
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n64
-PREHOOK: query: explain
-select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by rollup(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by rollup(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L) (type: bigint), grouping(_col2, 0L) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by rollup(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by rollup(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1	0	0	0
-1	NULL	0	0	0
-1	NULL	1	0	1
-2	2	0	0	0
-2	NULL	1	0	1
-3	3	0	0	0
-3	NULL	0	0	0
-3	NULL	1	0	1
-4	5	0	0	0
-4	NULL	1	0	1
-NULL	NULL	3	1	1
-PREHOOK: query: explain
-select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by cube(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by cube(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L) (type: bigint), grouping(_col2, 0L) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4
-            Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by cube(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by cube(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1	0	0	0
-1	NULL	0	0	0
-1	NULL	1	0	1
-2	2	0	0	0
-2	NULL	1	0	1
-3	3	0	0	0
-3	NULL	0	0	0
-3	NULL	1	0	1
-4	5	0	0	0
-4	NULL	1	0	1
-NULL	1	2	1	0
-NULL	2	2	1	0
-NULL	3	2	1	0
-NULL	5	2	1	0
-NULL	NULL	2	1	0
-NULL	NULL	3	1	1
-PREHOOK: query: explain
-select key, value
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (grouping(_col2, 1L) = 1L) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: int), _col1 (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-NULL	1
-NULL	2
-NULL	3
-NULL	5
-NULL	NULL
-NULL	NULL
-PREHOOK: query: explain
-select key, value, grouping(key)+grouping(value) as x
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1 OR grouping(value) = 1
-order by x desc, case when x = 1 then key end
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, grouping(key)+grouping(value) as x
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1 OR grouping(value) = 1
-order by x desc, case when x = 1 then key end
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: ((grouping(_col2, 1L) = 1L) or (grouping(_col2, 0L) = 1L)) (type: boolean)
-            Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: int), _col1 (type: int), (grouping(_col2, 1L) + grouping(_col2, 0L)) (type: bigint), CASE WHEN (((grouping(_col2, 1L) + grouping(_col2, 0L)) = 1L)) THEN (_col0) ELSE (null) END (type: int)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col2 (type: bigint), _col3 (type: int)
-              null sort order: zz
-              sort order: -+
-              Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: int), _col1 (type: int)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: int), VALUE._col1 (type: int), KEY.reducesinkkey0 (type: bigint)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, grouping(key)+grouping(value) as x
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1 OR grouping(value) = 1
-order by x desc, case when x = 1 then key end
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, grouping(key)+grouping(value) as x
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1 OR grouping(value) = 1
-order by x desc, case when x = 1 then key end
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	NULL	1
-2	NULL	1
-3	NULL	1
-4	NULL	1
-NULL	1	1
-NULL	2	1
-NULL	3	1
-NULL	5	1
-NULL	NULL	1
-NULL	NULL	2
-PREHOOK: query: explain
-select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by rollup(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by rollup(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L) (type: bigint), grouping(_col2, 0L) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by rollup(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by rollup(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1	0	0	0
-1	NULL	0	0	0
-1	NULL	1	0	1
-2	2	0	0	0
-2	NULL	1	0	1
-3	3	0	0	0
-3	NULL	0	0	0
-3	NULL	1	0	1
-4	5	0	0	0
-4	NULL	1	0	1
-NULL	NULL	3	1	1
-PREHOOK: query: explain
-select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by cube(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by cube(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L) (type: bigint), grouping(_col2, 0L) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4
-            Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by cube(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, `grouping__id`, grouping(key), grouping(value)
-from T1_n64
-group by cube(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1	0	0	0
-1	NULL	0	0	0
-1	NULL	1	0	1
-2	2	0	0	0
-2	NULL	1	0	1
-3	3	0	0	0
-3	NULL	0	0	0
-3	NULL	1	0	1
-4	5	0	0	0
-4	NULL	1	0	1
-NULL	1	2	1	0
-NULL	2	2	1	0
-NULL	3	2	1	0
-NULL	5	2	1	0
-NULL	NULL	2	1	0
-NULL	NULL	3	1	1
-PREHOOK: query: explain
-select key, value
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (grouping(_col2, 1L) = 1) (type: boolean)
-                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                    null sort order: zzz
-                    sort order: +++
-                    Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                    Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-NULL	1
-NULL	2
-NULL	3
-NULL	5
-NULL	NULL
-NULL	NULL
-PREHOOK: query: explain
-select key, value, grouping(key)+grouping(value) as x
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1 OR grouping(value) = 1
-order by x desc, case when x = 1 then key end
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, grouping(key)+grouping(value) as x
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1 OR grouping(value) = 1
-order by x desc, case when x = 1 then key end
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: ((grouping(_col2, 1L) = 1) or (grouping(_col2, 0L) = 1)) (type: boolean)
-                  Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                    null sort order: zzz
-                    sort order: +++
-                    Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                    Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), (grouping(_col2, 1L) + grouping(_col2, 0L)) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col2 (type: bigint), CASE WHEN ((_col2 = 1L)) THEN (_col0) END (type: int)
-              null sort order: zz
-              sort order: -+
-              Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: int), _col1 (type: int)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: int), VALUE._col1 (type: int), KEY.reducesinkkey0 (type: bigint)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, grouping(key)+grouping(value) as x
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1 OR grouping(value) = 1
-order by x desc, case when x = 1 then key end
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, grouping(key)+grouping(value) as x
-from T1_n64
-group by cube(key, value)
-having grouping(key) = 1 OR grouping(value) = 1
-order by x desc, case when x = 1 then key end
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	NULL	1
-2	NULL	1
-3	NULL	1
-4	NULL	1
-NULL	1	1
-NULL	2	1
-NULL	3	1
-NULL	5	1
-NULL	NULL	1
-NULL	NULL	2
-PREHOOK: query: explain
-select key, value, grouping(key), grouping(value)
-from T1_n64
-group by key, value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, grouping(key), grouping(value)
-from T1_n64
-group by key, value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), 0L (type: bigint), 0L (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, grouping(key), grouping(value)
-from T1_n64
-group by key, value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, grouping(key), grouping(value)
-from T1_n64
-group by key, value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1	0	0
-1	NULL	0	0
-2	2	0	0
-3	3	0	0
-3	NULL	0	0
-4	5	0	0
-PREHOOK: query: explain
-select key, value, grouping(value)
-from T1_n64
-group by key, value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, grouping(value)
-from T1_n64
-group by key, value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), 0L (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, grouping(value)
-from T1_n64
-group by key, value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, grouping(value)
-from T1_n64
-group by key, value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1	0
-1	NULL	0
-2	2	0
-3	3	0
-3	NULL	0
-4	5	0
-PREHOOK: query: explain
-select key, value
-from T1_n64
-group by key, value
-having grouping(key) = 0
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value
-from T1_n64
-group by key, value
-having grouping(key) = 0
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value
-from T1_n64
-group by key, value
-having grouping(key) = 0
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value
-from T1_n64
-group by key, value
-having grouping(key) = 0
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1
-1	NULL
-2	2
-3	3
-3	NULL
-4	5
-PREHOOK: query: explain
-select key, value, `grouping__id`, grouping(key, value)
-from T1_n64
-group by cube(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, `grouping__id`, grouping(key, value)
-from T1_n64
-group by cube(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L, 0L) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, `grouping__id`, grouping(key, value)
-from T1_n64
-group by cube(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, `grouping__id`, grouping(key, value)
-from T1_n64
-group by cube(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1	0	0
-1	NULL	0	0
-1	NULL	1	1
-2	2	0	0
-2	NULL	1	1
-3	3	0	0
-3	NULL	0	0
-3	NULL	1	1
-4	5	0	0
-4	NULL	1	1
-NULL	1	2	2
-NULL	2	2	2
-NULL	3	2	2
-NULL	5	2	2
-NULL	NULL	2	2
-NULL	NULL	3	3
-PREHOOK: query: explain
-select key, value, `grouping__id`, grouping(value, key)
-from T1_n64
-group by cube(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, `grouping__id`, grouping(value, key)
-from T1_n64
-group by cube(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 0L, 1L) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, `grouping__id`, grouping(value, key)
-from T1_n64
-group by cube(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, `grouping__id`, grouping(value, key)
-from T1_n64
-group by cube(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1	0	0
-1	NULL	0	0
-1	NULL	1	2
-2	2	0	0
-2	NULL	1	2
-3	3	0	0
-3	NULL	0	0
-3	NULL	1	2
-4	5	0	0
-4	NULL	1	2
-NULL	1	2	1
-NULL	2	2	1
-NULL	3	2	1
-NULL	5	2	1
-NULL	NULL	2	1
-NULL	NULL	3	3
-PREHOOK: query: explain
-select key, value, `grouping__id`, grouping(key, value)
-from T1_n64
-group by rollup(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, `grouping__id`, grouping(key, value)
-from T1_n64
-group by rollup(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L, 0L) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, `grouping__id`, grouping(key, value)
-from T1_n64
-group by rollup(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, `grouping__id`, grouping(key, value)
-from T1_n64
-group by rollup(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1	0	0
-1	NULL	0	0
-1	NULL	1	1
-2	2	0	0
-2	NULL	1	1
-3	3	0	0
-3	NULL	0	0
-3	NULL	1	1
-4	5	0	0
-4	NULL	1	1
-NULL	NULL	3	3
-PREHOOK: query: explain
-select key, value, `grouping__id`, grouping(value, key)
-from T1_n64
-group by rollup(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select key, value, `grouping__id`, grouping(value, key)
-from T1_n64
-group by rollup(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n64
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: int), value (type: int)
-              outputColumnNames: key, value
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: key (type: int), value (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint)
-                  Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 0L, 1L) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, `grouping__id`, grouping(value, key)
-from T1_n64
-group by rollup(key, value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, `grouping__id`, grouping(value, key)
-from T1_n64
-group by rollup(key, value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n64
-#### A masked pattern was here ####
-1	1	0	0
-1	NULL	0	0
-1	NULL	1	2
-2	2	0	0
-2	NULL	1	2
-3	3	0	0
-3	NULL	0	0
-3	NULL	1	2
-4	5	0	0
-4	NULL	1	2
-NULL	NULL	3	3
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out
deleted file mode 100644
index b4aa6d1..0000000
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out
+++ /dev/null
@@ -1,518 +0,0 @@
-PREHOOK: query: CREATE TABLE T1_n141(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@T1_n141
-POSTHOOK: query: CREATE TABLE T1_n141(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@T1_n141
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n141
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@t1_n141
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1_n141
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@t1_n141
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) from T1_n141 group by a, b with cube LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) from T1_n141 group by a, b with cube LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n141
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
-                  value expressions: _col3 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            Limit
-              Number of rows: 10
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, count(*) from T1_n141 group by a, b with cube LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, count(*) from T1_n141 group by a, b with cube LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-1	1	1
-1	NULL	1
-2	2	1
-2	3	1
-2	NULL	2
-3	2	1
-3	NULL	1
-5	2	1
-5	NULL	1
-8	1	1
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ()) LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ()) LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n141
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
-                  value expressions: _col3 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-            Limit
-              Number of rows: 10
-              Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ()) LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b  GROUPING SETS (a, (a, b), b, ()) LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-1	1	1
-1	NULL	1
-2	2	1
-2	3	1
-2	NULL	2
-3	2	1
-3	NULL	1
-5	2	1
-5	NULL	1
-8	1	1
-PREHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n141
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string)
-              outputColumnNames: a, b
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: a (type: string), b (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  null sort order: zzz
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-                  Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
-                  value expressions: _col3 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Limit
-              Number of rows: 10
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a, b, count(*) FROM T1_n141 GROUP BY a, b GROUPING SETS (a, (a, b)) LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-1	1	1
-1	NULL	1
-2	2	1
-2	3	1
-2	NULL	2
-3	2	1
-3	NULL	1
-5	2	1
-5	NULL	1
-8	1	1
-PREHOOK: query: EXPLAIN
-SELECT a FROM T1_n141 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a FROM T1_n141 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n141
-            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string), b (type: string), c (type: string)
-              outputColumnNames: a, b, c
-              Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: a (type: string), b (type: string), c (type: string), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 3 Data size: 1656 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint)
-                  null sort order: zzzz
-                  sort order: ++++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint)
-                  Statistics: Num rows: 3 Data size: 1656 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-          pruneGroupingSetId: true
-          Select Operator
-            expressions: _col0 (type: string)
-            outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-            Limit
-              Number of rows: 10
-              Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a FROM T1_n141 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a FROM T1_n141 GROUP BY a, b, c GROUPING SETS (a, b, c) LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-1
-2
-3
-5
-8
-NULL
-NULL
-NULL
-NULL
-NULL
-PREHOOK: query: EXPLAIN
-SELECT a FROM T1_n141 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a FROM T1_n141 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n141
-            Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: a (type: string)
-              outputColumnNames: a
-              Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: a (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a FROM T1_n141 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a FROM T1_n141 GROUP BY a GROUPING SETS ((a), (a)) LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-1
-2
-3
-5
-8
-PREHOOK: query: EXPLAIN
-SELECT a + b, count(*) FROM T1_n141 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT a + b, count(*) FROM T1_n141 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1_n141
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: (UDFToDouble(a) + UDFToDouble(b)) (type: double)
-              outputColumnNames: _col0
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: _col0 (type: double)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: double)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: double)
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
-                  value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: double)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT a + b, count(*) FROM T1_n141 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a + b, count(*) FROM T1_n141 GROUP BY a + b GROUPING SETS (a+b) LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t1_n141
-#### A masked pattern was here ####
-2.0	1
-4.0	1
-5.0	2
-7.0	1
-9.0	1
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_window.q.out b/ql/src/test/results/clientpositive/groupby_grouping_window.q.out
deleted file mode 100644
index 21d9256..0000000
--- a/ql/src/test/results/clientpositive/groupby_grouping_window.q.out
+++ /dev/null
@@ -1,210 +0,0 @@
-PREHOOK: query: create table t_n33(category int, live int, comments int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@t_n33
-POSTHOOK: query: create table t_n33(category int, live int, comments int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@t_n33
-PREHOOK: query: insert into table t_n33 select key, 0, 2 from src tablesample(3 rows)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@t_n33
-POSTHOOK: query: insert into table t_n33 select key, 0, 2 from src tablesample(3 rows)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@t_n33
-POSTHOOK: Lineage: t_n33.category EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: t_n33.comments SIMPLE []
-POSTHOOK: Lineage: t_n33.live SIMPLE []
-PREHOOK: query: explain
-select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1
-FROM t_n33
-GROUP BY category
-GROUPING SETS ((), (category))
-HAVING max(comments) > 0
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t_n33
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1
-FROM t_n33
-GROUP BY category
-GROUPING SETS ((), (category))
-HAVING max(comments) > 0
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t_n33
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t_n33
-            Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: category (type: int), live (type: int), comments (type: int)
-              outputColumnNames: category, live, comments
-              Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                aggregations: max(live), max(comments)
-                keys: category (type: int), 0L (type: bigint)
-                minReductionHashAggr: 0.99
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: bigint)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: bigint)
-                  Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col2 (type: int), _col3 (type: int)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: max(VALUE._col0), max(VALUE._col1)
-          keys: KEY._col0 (type: int), KEY._col1 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col2, _col3
-          Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE
-          pruneGroupingSetId: true
-          Filter Operator
-            predicate: (_col3 > 0) (type: boolean)
-            Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col3 (type: int)
-              null sort order: az
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: int)
-              Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col2 (type: int)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col1 (type: int), KEY.reducesinkkey1 (type: int)
-          outputColumnNames: _col0, _col2, _col3
-          Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE
-          PTF Operator
-            Function definitions:
-                Input definition
-                  input alias: ptf_0
-                  output shape: _col0: int, _col2: int, _col3: int
-                  type: WINDOWING
-                Windowing table definition
-                  input alias: ptf_1
-                  name: windowingtablefunction
-                  order by: _col3 ASC NULLS LAST
-                  partition by: _col0
-                  raw input shape:
-                  window functions:
-                      window function definition
-                        alias: rank_window_0
-                        arguments: _col3
-                        name: rank
-                        window function: GenericUDAFRankEvaluator
-                        window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX)
-                        isPivotResult: true
-            Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: _col0 (type: int), _col2 (type: int), _col3 (type: int), rank_window_0 (type: int)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1
-FROM t_n33
-GROUP BY category
-GROUPING SETS ((), (category))
-HAVING max(comments) > 0
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t_n33
-#### A masked pattern was here ####
-POSTHOOK: query: select category, max(live) live, max(comments) comments, rank() OVER (PARTITION BY category ORDER BY comments) rank1
-FROM t_n33
-GROUP BY category
-GROUPING SETS ((), (category))
-HAVING max(comments) > 0
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t_n33
-#### A masked pattern was here ####
-NULL	0	2	1
-86	0	2	1
-238	0	2	1
-311	0	2	1
-PREHOOK: query: SELECT grouping(category), lead(live) over(partition by grouping(category))
-FROM t_n33
-GROUP BY category, live
-GROUPING SETS ((), (category))
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t_n33
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT grouping(category), lead(live) over(partition by grouping(category))
-FROM t_n33
-GROUP BY category, live
-GROUPING SETS ((), (category))
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t_n33
-#### A masked pattern was here ####
-0	NULL
-0	NULL
-0	NULL
-1	NULL
-PREHOOK: query: SELECT grouping(category), lead(live) over(partition by grouping(category))
-FROM t_n33
-GROUP BY category, live
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t_n33
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT grouping(category), lead(live) over(partition by grouping(category))
-FROM t_n33
-GROUP BY category, live
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t_n33
-#### A masked pattern was here ####
-0	0
-0	0
-0	NULL
-PREHOOK: query: SELECT grouping(category), lag(live) over(partition by grouping(category))
-FROM t_n33
-GROUP BY category, live
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t_n33
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT grouping(category), lag(live) over(partition by grouping(category))
-FROM t_n33
-GROUP BY category, live
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t_n33
-#### A masked pattern was here ####
-0	NULL
-0	0
-0	0
diff --git a/ql/src/test/results/clientpositive/groupby_join_pushdown.q.out b/ql/src/test/results/clientpositive/groupby_join_pushdown.q.out
deleted file mode 100644
index 2138eae..0000000
--- a/ql/src/test/results/clientpositive/groupby_join_pushdown.q.out
+++ /dev/null
@@ -1,1847 +0,0 @@
-PREHOOK: query: EXPLAIN
-SELECT f.key, g.key, count(g.key)
-FROM src f JOIN src g ON(f.key = g.key)
-GROUP BY f.key, g.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT f.key, g.key, count(g.key)
-FROM src f JOIN src g ON(f.key = g.key)
-GROUP BY f.key, g.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: key is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 250 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: string), _col2 (type: string), (_col1 * _col3) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 250 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 250 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: g
-            filterExpr: key is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count(_col0)
-                  keys: _col0 (type: string)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: EXPLAIN
-SELECT f.key, g.key
-FROM src f JOIN src g ON(f.key = g.key)
-GROUP BY f.key, g.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT f.key, g.key
-FROM src f JOIN src g ON(f.key = g.key)
-GROUP BY f.key, g.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: key is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  keys: _col0 (type: string)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 250 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: g
-            filterExpr: key is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  keys: _col0 (type: string)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: EXPLAIN
-SELECT DISTINCT f.value, g.value
-FROM src f JOIN src g ON(f.value = g.value)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT DISTINCT f.value, g.value
-FROM src f JOIN src g ON(f.value = g.value)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: value is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: value is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  keys: _col0 (type: string)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 250 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: g
-            filterExpr: value is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: value is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  keys: _col0 (type: string)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: EXPLAIN
-SELECT f.key, g.key, COUNT(*)
-FROM src f JOIN src g ON(f.key = g.key)
-GROUP BY f.key, g.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT f.key, g.key, COUNT(*)
-FROM src f JOIN src g ON(f.key = g.key)
-GROUP BY f.key, g.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: key is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 250 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: string), _col2 (type: string), (_col1 * _col3) (type: bigint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 250 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 250 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: g
-            filterExpr: key is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: EXPLAIN
-SELECT  f.ctinyint, g.ctinyint, SUM(f.cbigint)              
-FROM alltypesorc f JOIN alltypesorc g ON(f.cint = g.cint)
-GROUP BY f.ctinyint, g.ctinyint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT  f.ctinyint, g.ctinyint, SUM(f.cbigint)              
-FROM alltypesorc f JOIN alltypesorc g ON(f.cint = g.cint)
-GROUP BY f.ctinyint, g.ctinyint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: cint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 146784 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: cint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint), cint (type: int), cbigint (type: bigint)
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col1 (type: int)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: int)
-                  Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: tinyint), _col2 (type: bigint)
-          TableScan
-            alias: g
-            filterExpr: cint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: cint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint), cint (type: int)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col1 (type: int)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: int)
-                  Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: tinyint)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col1 (type: int)
-            1 _col1 (type: int)
-          outputColumnNames: _col0, _col2, _col3
-          Statistics: Num rows: 13785 Data size: 183376 Basic stats: COMPLETE Column stats: COMPLETE
-          Group By Operator
-            aggregations: sum(_col2)
-            keys: _col0 (type: tinyint), _col3 (type: tinyint)
-            minReductionHashAggr: 0.99
-            mode: hash
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 6892 Data size: 100984 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint), _col1 (type: tinyint)
-              null sort order: zz
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: tinyint)
-              Statistics: Num rows: 6892 Data size: 100984 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col2 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: tinyint), KEY._col1 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 6892 Data size: 100984 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 6892 Data size: 100984 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: EXPLAIN
-SELECT  f.cbigint, g.cbigint, MAX(f.cint)              
-FROM alltypesorc f JOIN alltypesorc g ON(f.cbigint = g.cbigint)
-GROUP BY f.cbigint, g.cbigint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: EXPLAIN
-SELECT  f.cbigint, g.cbigint, MAX(f.cint)              
-FROM alltypesorc f JOIN alltypesorc g ON(f.cbigint = g.cbigint)
-GROUP BY f.cbigint, g.cbigint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: cbigint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 110088 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: cbigint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 82188 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: cint (type: int), cbigint (type: bigint)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 9173 Data size: 82188 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: max(_col0)
-                  keys: _col1 (type: bigint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 4586 Data size: 45744 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: bigint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: bigint)
-                    Statistics: Num rows: 4586 Data size: 45744 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: int)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: max(VALUE._col0)
-          keys: KEY._col0 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 4586 Data size: 45744 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: bigint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: bigint)
-              Statistics: Num rows: 4586 Data size: 45744 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: int)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: bigint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: bigint)
-              Statistics: Num rows: 4586 Data size: 27400 Basic stats: COMPLETE Column stats: COMPLETE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: bigint)
-            1 _col0 (type: bigint)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 4586 Data size: 73144 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: bigint), _col2 (type: bigint), _col1 (type: int)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 4586 Data size: 73144 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 4586 Data size: 73144 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: g
-            filterExpr: cbigint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: cbigint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: cbigint (type: bigint)
-                outputColumnNames: _col0
-                Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  keys: _col0 (type: bigint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 4586 Data size: 27400 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: bigint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: bigint)
-                    Statistics: Num rows: 4586 Data size: 27400 Basic stats: COMPLETE Column stats: COMPLETE
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: bigint)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 4586 Data size: 27400 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: explain
-SELECT  f.ctinyint, g.ctinyint, MIN(f.ctinyint)              
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-SELECT  f.ctinyint, g.ctinyint, MIN(f.ctinyint)              
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint)
-                outputColumnNames: _col0
-                Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: min(_col0)
-                  keys: _col0 (type: tinyint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 131 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: tinyint)
-                    Statistics: Num rows: 131 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: tinyint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: min(VALUE._col0)
-          keys: KEY._col0 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 131 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 131 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: tinyint)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: tinyint)
-            1 _col0 (type: tinyint)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 132 Data size: 1328 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: tinyint), _col2 (type: tinyint), _col1 (type: tinyint)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 132 Data size: 1328 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 132 Data size: 1328 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: g
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint)
-                outputColumnNames: _col0
-                Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  keys: _col0 (type: tinyint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: tinyint)
-                    Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: explain
-SELECT   MIN(f.cint)     
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-SELECT   MIN(f.cint)     
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint), cint (type: int)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: min(_col1)
-                  keys: _col0 (type: tinyint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 131 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: tinyint)
-                    Statistics: Num rows: 131 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: int)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: min(VALUE._col0)
-          keys: KEY._col0 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 131 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 131 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: int)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: tinyint)
-            1 _col0 (type: tinyint)
-          outputColumnNames: _col1
-          Statistics: Num rows: 132 Data size: 528 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col1 (type: int)
-            outputColumnNames: _col0
-            Statistics: Num rows: 132 Data size: 528 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 132 Data size: 528 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: g
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint)
-                outputColumnNames: _col0
-                Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  keys: _col0 (type: tinyint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: tinyint)
-                    Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 131 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: explain
-SELECT   count(f.ctinyint)              
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-SELECT   count(f.ctinyint)              
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint)
-                outputColumnNames: _col0
-                Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count(_col0)
-                  keys: _col0 (type: tinyint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: tinyint)
-                    Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: tinyint)
-            1 _col0 (type: tinyint)
-          outputColumnNames: _col1, _col3
-          Statistics: Num rows: 132 Data size: 2112 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: (_col1 * _col3) (type: bigint)
-            outputColumnNames: _col0
-            Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 132 Data size: 1056 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: g
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint)
-                outputColumnNames: _col0
-                Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: tinyint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: tinyint)
-                    Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: explain
-SELECT   count(f.cint), f.ctinyint              
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-SELECT   count(f.cint), f.ctinyint              
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint), cint (type: int)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: tinyint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: tinyint)
-                    Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: tinyint)
-            1 _col0 (type: tinyint)
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 132 Data size: 2512 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: (_col1 * _col3) (type: bigint), _col0 (type: tinyint)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 132 Data size: 1456 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 132 Data size: 1456 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: g
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint)
-                outputColumnNames: _col0
-                Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: tinyint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: tinyint)
-                    Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: explain
-SELECT   sum(f.cint), f.ctinyint            
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-SELECT   sum(f.cint), f.ctinyint            
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3
-  Stage-3 is a root stage
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint), cint (type: int)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: sum(_col1)
-                  keys: _col0 (type: tinyint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: tinyint)
-                    Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint)
-              null sort order: z
-              sort order: +
-              Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: tinyint)
-            1 _col0 (type: tinyint)
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 132 Data size: 2512 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: (_col1 * _col3) (type: bigint), _col0 (type: tinyint)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 132 Data size: 1456 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 132 Data size: 1456 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: g
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint)
-                outputColumnNames: _col0
-                Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: tinyint)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: tinyint)
-                    null sort order: z
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: tinyint)
-                    Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col1 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 131 Data size: 1444 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: ALTER TABLE alltypesorc ADD CONSTRAINT pk_alltypesorc_1 PRIMARY KEY (ctinyint) DISABLE RELY
-PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-POSTHOOK: query: ALTER TABLE alltypesorc ADD CONSTRAINT pk_alltypesorc_1 PRIMARY KEY (ctinyint) DISABLE RELY
-POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT
-PREHOOK: query: explain
-SELECT sum(f.cint), f.ctinyint            
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-SELECT sum(f.cint), f.ctinyint            
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: ctinyint (type: tinyint), cint (type: int)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-              Reduce Output Operator
-                key expressions: _col0 (type: tinyint)
-                null sort order: z
-                sort order: +
-                Map-reduce partition columns: _col0 (type: tinyint)
-                Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-                value expressions: _col1 (type: int)
-          TableScan
-            alias: g
-            Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: ctinyint (type: tinyint)
-              outputColumnNames: _col0
-              Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-              Reduce Output Operator
-                key expressions: _col0 (type: tinyint)
-                null sort order: z
-                sort order: +
-                Map-reduce partition columns: _col0 (type: tinyint)
-                Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: tinyint)
-            1 _col0 (type: tinyint)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1161499 Data size: 13900620 Basic stats: COMPLETE Column stats: COMPLETE
-          Group By Operator
-            aggregations: sum(_col1)
-            keys: _col0 (type: tinyint), _col2 (type: tinyint)
-            minReductionHashAggr: 0.99
-            mode: hash
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 17161 Data size: 274216 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint), _col1 (type: tinyint)
-              null sort order: zz
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: tinyint)
-              Statistics: Num rows: 17161 Data size: 274216 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col2 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: tinyint), KEY._col1 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 17161 Data size: 274216 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col2 (type: bigint), _col0 (type: tinyint)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 17161 Data size: 205752 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 17161 Data size: 205752 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: ALTER TABLE alltypesorc DROP CONSTRAINT pk_alltypesorc_1
-PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
-POSTHOOK: query: ALTER TABLE alltypesorc DROP CONSTRAINT pk_alltypesorc_1
-POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
-PREHOOK: query: ALTER TABLE alltypesorc ADD CONSTRAINT uk_alltypesorc_1 UNIQUE (ctinyint) DISABLE RELY
-PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-POSTHOOK: query: ALTER TABLE alltypesorc ADD CONSTRAINT uk_alltypesorc_1 UNIQUE (ctinyint) DISABLE RELY
-POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT
-PREHOOK: query: explain
-SELECT sum(f.cint), f.ctinyint            
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-POSTHOOK: query: explain
-SELECT sum(f.cint), f.ctinyint            
-FROM alltypesorc f JOIN alltypesorc g ON(f.ctinyint = g.ctinyint)
-GROUP BY f.ctinyint, g.ctinyint
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-#### A masked pattern was here ####
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: f
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 73392 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint), cint (type: int)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: tinyint)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: tinyint)
-                  Statistics: Num rows: 9173 Data size: 54792 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col1 (type: int)
-          TableScan
-            alias: g
-            filterExpr: ctinyint is not null (type: boolean)
-            Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ctinyint is not null (type: boolean)
-              Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ctinyint (type: tinyint)
-                outputColumnNames: _col0
-                Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: tinyint)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: tinyint)
-                  Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: tinyint)
-            1 _col0 (type: tinyint)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 647260 Data size: 7739232 Basic stats: COMPLETE Column stats: COMPLETE
-          Group By Operator
-            aggregations: sum(_col1)
-            keys: _col0 (type: tinyint), _col2 (type: tinyint)
-            minReductionHashAggr: 0.99
-            mode: hash
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 17161 Data size: 274088 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: tinyint), _col1 (type: tinyint)
-              null sort order: zz
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: tinyint)
-              Statistics: Num rows: 17161 Data size: 274088 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col2 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0)
-          keys: KEY._col0 (type: tinyint), KEY._col1 (type: tinyint)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 17161 Data size: 274088 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col2 (type: bigint), _col0 (type: tinyint)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 17161 Data size: 205688 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 17161 Data size: 205688 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
diff --git a/ql/src/test/results/clientpositive/groupby_map_ppr.q.out b/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
deleted file mode 100644
index 621a80a..0000000
--- a/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
+++ /dev/null
@@ -1,405 +0,0 @@
-PREHOOK: query: CREATE TABLE dest1_n144(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@dest1_n144
-POSTHOOK: query: CREATE TABLE dest1_n144(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@dest1_n144
-PREHOOK: query: EXPLAIN EXTENDED
-FROM srcpart src
-INSERT OVERWRITE TABLE dest1_n144 
-SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) 
-WHERE src.ds = '2008-04-08'
-GROUP BY substr(src.key,1,1)
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@dest1_n144
-POSTHOOK: query: EXPLAIN EXTENDED
-FROM srcpart src
-INSERT OVERWRITE TABLE dest1_n144 
-SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) 
-WHERE src.ds = '2008-04-08'
-GROUP BY substr(src.key,1,1)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
... 218452 lines suppressed ...