You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by mg...@apache.org on 2020/05/18 10:10:51 UTC

[hive] branch master updated: HIVE-23440 Move q tests to TestMiniLlapLocal from TestCliDriver where the output is different, batch 4 (Miklos Gergely, reviewed by Jesus Camacho Rodriguez)

This is an automated email from the ASF dual-hosted git repository.

mgergely pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 105e33a  HIVE-23440 Move q tests to TestMiniLlapLocal from TestCliDriver where the output is different, batch 4 (Miklos Gergely, reviewed by Jesus Camacho Rodriguez)
105e33a is described below

commit 105e33a1de301b876bd4c79a16a38d42f72db831
Author: miklosgergely <mg...@cloudera.com>
AuthorDate: Mon May 18 12:04:29 2020 +0200

    HIVE-23440 Move q tests to TestMiniLlapLocal from TestCliDriver where the output is different, batch 4 (Miklos Gergely, reviewed by Jesus Camacho Rodriguez)
---
 .../test/resources/testconfiguration.properties    |  235 +-
 .../queries/clientpositive/temp_table_options1.q   |   11 +-
 .../temp_table_parquet_mixed_partition_formats2.q  |    8 +-
 .../temp_table_partition_multilevels.q             |   60 +-
 .../queries/clientpositive/udf_concat_insert1.q    |    2 +
 ql/src/test/queries/clientpositive/udf_explode.q   |   24 +-
 .../queries/clientpositive/udf_sort_array_by.q     |    2 +
 .../test/queries/clientpositive/udf_width_bucket.q |    2 +
 ql/src/test/queries/clientpositive/udtf_explode.q  |   24 +-
 .../queries/clientpositive/udtf_parse_url_tuple.q  |    2 +
 ql/src/test/queries/clientpositive/union.q         |    5 +-
 ql/src/test/queries/clientpositive/union13.q       |    2 +-
 ql/src/test/queries/clientpositive/union15.q       |    3 +-
 ql/src/test/queries/clientpositive/union35.q       |    2 +
 .../test/queries/clientpositive/union_pos_alias.q  |    4 +-
 ql/src/test/queries/clientpositive/varchar_serde.q |   24 +-
 .../vectorization_parquet_ppd_decimal.q            |    7 +-
 ql/src/test/queries/clientpositive/view_alias.q    |   16 +-
 .../queries/clientpositive/windowing_expressions.q |   13 +-
 .../clientpositive/windowing_multipartitioning.q   |    4 +-
 .../clientpositive/windowing_range_multiorder.q    |   22 +-
 .../test/queries/clientpositive/windowing_udaf.q   |   15 +-
 .../queries/clientpositive/windowing_windowspec3.q |    2 +
 .../temp_table_merge_dynamic_partition.q.out       |  368 ++-
 .../temp_table_merge_dynamic_partition2.q.out      |  122 +-
 .../temp_table_merge_dynamic_partition3.q.out      |  122 +-
 .../temp_table_merge_dynamic_partition4.q.out      |  112 +-
 .../temp_table_merge_dynamic_partition5.q.out      |  112 +-
 .../{ => llap}/temp_table_options1.q.out           |  228 +-
 ...mp_table_parquet_mixed_partition_formats2.q.out |   18 +-
 .../llap/temp_table_partition_boolexpr.q.out       |  317 +++
 .../temp_table_partition_condition_remover.q.out   |    2 -
 .../{ => llap}/temp_table_partition_ctas.q.out     |  129 +-
 .../temp_table_partition_multilevels.q.out         |  274 +-
 .../{ => llap}/temp_table_partition_pruning.q.out  |  219 +-
 .../temp_table_windowing_expressions.q.out         |   12 +-
 .../{ => llap}/test_teradatabinaryfile.q.out       |    4 +-
 .../results/clientpositive/llap/timestamp.q.out    |  415 +++
 .../{ => llap}/timestamp_comparison3.q.out         |  136 +-
 .../{ => llap}/timestamp_ints_casts.q.out          |   72 +-
 .../{ => llap}/timestamp_literal.q.out             |    6 -
 .../clientpositive/{ => llap}/timestamptz.q.out    |    8 -
 .../{ => llap}/truncate_column_buckets.q.out       |    0
 .../{ => llap}/truncate_column_list_bucket.q.out   |  142 +-
 .../clientpositive/{ => llap}/type_cast_1.q.out    |    3 -
 .../clientpositive/{ => llap}/type_widening.q.out  |  109 +-
 .../{ => llap}/udaf_binarysetfunctions.q.out       |  128 +-
 .../udaf_binarysetfunctions_no_cbo.q.out           |  112 +-
 .../clientpositive/llap/udaf_number_format.q.out   |   95 +
 .../{ => llap}/udaf_percentile_cont.q.out          |   80 +-
 .../{ => llap}/udaf_percentile_disc.q.out          |   80 +-
 ql/src/test/results/clientpositive/llap/udf1.q.out |  176 ++
 .../results/clientpositive/{ => llap}/udf2.q.out   |   29 +-
 ql/src/test/results/clientpositive/llap/udf3.q.out |  136 +
 .../results/clientpositive/{ => llap}/udf4.q.out   |   29 +-
 .../results/clientpositive/{ => llap}/udf5.q.out   |    4 -
 .../results/clientpositive/{ => llap}/udf6.q.out   |    4 -
 .../results/clientpositive/{ => llap}/udf7.q.out   |    2 -
 ql/src/test/results/clientpositive/llap/udf8.q.out |  105 +
 .../results/clientpositive/{ => llap}/udf9.q.out   |   36 +-
 .../results/clientpositive/llap/udf_10_trims.q.out |  128 +
 .../results/clientpositive/{ => llap}/udf_E.q.out  |    4 -
 .../results/clientpositive/{ => llap}/udf_PI.q.out |    4 -
 .../clientpositive/{ => llap}/udf_abs.q.out        |    4 -
 .../clientpositive/{ => llap}/udf_add_months.q.out |    2 -
 .../{ => llap}/udf_aes_decrypt.q.out               |    2 -
 .../{ => llap}/udf_aes_encrypt.q.out               |    2 -
 .../clientpositive/{ => llap}/udf_array.q.out      |    2 -
 .../clientpositive/{ => llap}/udf_ascii.q.out      |    2 -
 .../clientpositive/{ => llap}/udf_between.q.out    |   23 -
 .../{ => llap}/udf_bitwise_shiftleft.q.out         |    2 -
 .../{ => llap}/udf_bitwise_shiftright.q.out        |    2 -
 .../udf_bitwise_shiftrightunsigned.q.out           |    2 -
 .../clientpositive/{ => llap}/udf_case.q.out       |    2 -
 .../llap/udf_case_column_pruning.q.out             |  163 ++
 .../{ => llap}/udf_case_thrift.q.out               |    2 -
 .../clientpositive/{ => llap}/udf_cbrt.q.out       |    2 -
 .../{ => llap}/udf_character_length.q.out          |  156 +-
 .../{ => llap}/udf_concat_insert1.q.out            |    0
 .../clientpositive/{ => llap}/udf_concat_ws.q.out  |    4 -
 .../clientpositive/{ => llap}/udf_crc32.q.out      |    2 -
 .../{ => llap}/udf_current_database.q.out          |    8 -
 .../{ => llap}/udf_date_format.q.out               |    2 -
 .../udf_datetime_legacy_hybrid_calendar.q.out      |   38 +-
 .../clientpositive/{ => llap}/udf_decode.q.out     |    2 -
 .../clientpositive/{ => llap}/udf_degrees.q.out    |    4 -
 .../clientpositive/{ => llap}/udf_elt.q.out        |    2 -
 .../{ => llap}/udf_example_add.q.out               |    3 -
 .../results/clientpositive/llap/udf_explode.q.out  |  655 +++++
 .../clientpositive/{ => llap}/udf_factorial.q.out  |    2 -
 .../{ => llap}/udf_find_in_set.q.out               |    2 -
 .../clientpositive/llap/udf_folder_constants.q.out |  148 ++
 .../{ => llap}/udf_format_number.q.out             |    2 -
 .../{ => llap}/udf_from_utc_timestamp.q.out        |    2 -
 .../{ => llap}/udf_get_json_object.q.out           |    2 -
 .../clientpositive/{ => llap}/udf_greatest.q.out   |    2 -
 .../clientpositive/{ => llap}/udf_hash.q.out       |    2 -
 .../clientpositive/{ => llap}/udf_hour.q.out       |    3 -
 .../results/clientpositive/{ => llap}/udf_if.q.out |    4 -
 .../clientpositive/{ => llap}/udf_in_file.q.out    |   34 +-
 .../clientpositive/{ => llap}/udf_inline.q.out     |    5 -
 .../clientpositive/{ => llap}/udf_instr.q.out      |    2 -
 .../{ => llap}/udf_isnull_isnotnull.q.out          |    7 -
 .../clientpositive/llap/udf_isops_simplify.q.out   |  422 +++
 .../{ => llap}/udf_java_method.q.out               |    2 -
 .../clientpositive/{ => llap}/udf_last_day.q.out   |    2 -
 .../clientpositive/{ => llap}/udf_least.q.out      |    2 -
 .../clientpositive/{ => llap}/udf_length.q.out     |  154 +-
 .../{ => llap}/udf_levenshtein.q.out               |    2 -
 .../clientpositive/{ => llap}/udf_like.q.out       |    3 -
 .../clientpositive/{ => llap}/udf_locate.q.out     |    2 -
 .../clientpositive/{ => llap}/udf_lower.q.out      |   36 +-
 .../clientpositive/{ => llap}/udf_lpad.q.out       |    2 -
 .../clientpositive/{ => llap}/udf_map.q.out        |    2 -
 .../clientpositive/{ => llap}/udf_mask.q.out       |    2 -
 .../{ => llap}/udf_mask_first_n.q.out              |    2 -
 .../clientpositive/{ => llap}/udf_mask_hash.q.out  |    2 -
 .../{ => llap}/udf_mask_last_n.q.out               |    2 -
 .../{ => llap}/udf_mask_show_first_n.q.out         |    2 -
 .../{ => llap}/udf_mask_show_last_n.q.out          |    2 -
 .../clientpositive/{ => llap}/udf_md5.q.out        |    2 -
 .../clientpositive/{ => llap}/udf_minute.q.out     |   36 +-
 .../{ => llap}/udf_months_between.q.out            |    2 -
 .../{ => llap}/udf_named_struct.q.out              |    2 -
 .../clientpositive/{ => llap}/udf_next_day.q.out   |    2 -
 .../clientpositive/{ => llap}/udf_notequal.q.out   |    6 -
 .../clientpositive/{ => llap}/udf_nullif.q.out     |    6 -
 .../clientpositive/{ => llap}/udf_nvl.q.out        |    2 -
 .../{ => llap}/udf_octet_length.q.out              |  154 +-
 .../clientpositive/{ => llap}/udf_parse_url.q.out  |   36 +-
 .../clientpositive/{ => llap}/udf_position.q.out   |    2 -
 .../clientpositive/{ => llap}/udf_quarter.q.out    |    2 -
 .../clientpositive/{ => llap}/udf_radians.q.out    |    4 -
 .../clientpositive/{ => llap}/udf_reflect.q.out    |    2 -
 .../clientpositive/{ => llap}/udf_reflect2.q.out   |    3 -
 .../clientpositive/{ => llap}/udf_repeat.q.out     |    2 -
 .../clientpositive/{ => llap}/udf_reverse.q.out    |  150 +-
 .../clientpositive/{ => llap}/udf_rpad.q.out       |    2 -
 .../clientpositive/{ => llap}/udf_second.q.out     |    3 -
 .../clientpositive/{ => llap}/udf_sha1.q.out       |    2 -
 .../clientpositive/{ => llap}/udf_sha2.q.out       |    2 -
 .../clientpositive/{ => llap}/udf_sign.q.out       |    4 -
 .../clientpositive/{ => llap}/udf_size.q.out       |    4 -
 .../clientpositive/{ => llap}/udf_sort_array.q.out |    2 -
 .../{ => llap}/udf_sort_array_by.q.out             |   52 +-
 .../clientpositive/{ => llap}/udf_soundex.q.out    |    2 -
 .../clientpositive/{ => llap}/udf_space.q.out      |    2 -
 .../clientpositive/{ => llap}/udf_split.q.out      |    2 -
 .../clientpositive/{ => llap}/udf_struct.q.out     |    2 -
 .../{ => llap}/udf_substring_index.q.out           |    2 -
 .../{ => llap}/udf_to_unix_timestamp.q.out         |    6 -
 .../{ => llap}/udf_to_utc_timestamp.q.out          |    2 -
 .../clientpositive/{ => llap}/udf_trunc.q.out      |   24 -
 .../{ => llap}/udf_trunc_number.q.out              |   12 -
 .../clientpositive/{ => llap}/udf_union.q.out      |    2 -
 .../clientpositive/{ => llap}/udf_when.q.out       |    2 -
 .../{ => llap}/udf_width_bucket.q.out              |    4 +-
 .../results/clientpositive/llap/udtf_explode.q.out |  546 ++++
 .../{ => llap}/udtf_get_sql_schema.q.out           |    8 -
 .../clientpositive/llap/udtf_json_tuple.q.out      |  501 ++++
 .../clientpositive/llap/udtf_parse_url_tuple.q.out |  468 ++++
 .../results/clientpositive/llap/udtf_stack.q.out   |  149 ++
 .../{ => llap}/unicode_comments.q.out              |    0
 .../clientpositive/{ => llap}/unicode_data.q.out   |   40 +-
 .../results/clientpositive/{ => llap}/union.q.out  | 1062 ++++----
 .../test/results/clientpositive/llap/union10.q.out |  292 +++
 .../test/results/clientpositive/llap/union11.q.out |  221 ++
 .../test/results/clientpositive/llap/union12.q.out |  300 +++
 .../clientpositive/{ => llap}/union13.q.out        | 1911 +++++++-------
 .../test/results/clientpositive/llap/union14.q.out |  163 ++
 .../test/results/clientpositive/llap/union15.q.out |  193 ++
 .../test/results/clientpositive/llap/union16.q.out |  746 ++++++
 .../clientpositive/{ => llap}/union17.q.out        |  429 ++--
 .../clientpositive/{ => llap}/union18.q.out        |  400 ++-
 .../clientpositive/{ => llap}/union19.q.out        |  386 ++-
 .../test/results/clientpositive/llap/union20.q.out |  223 ++
 .../test/results/clientpositive/llap/union21.q.out |  773 ++++++
 .../clientpositive/{ => llap}/union22.q.out        |  828 +++---
 .../test/results/clientpositive/llap/union24.q.out | 1407 ++++++++++
 .../test/results/clientpositive/llap/union25.q.out |  271 ++
 .../clientpositive/{ => llap}/union26.q.out        |  294 +--
 .../test/results/clientpositive/llap/union27.q.out |  178 ++
 .../test/results/clientpositive/llap/union28.q.out |  306 +++
 .../test/results/clientpositive/llap/union29.q.out |  266 ++
 .../test/results/clientpositive/llap/union30.q.out |  373 +++
 .../test/results/clientpositive/llap/union31.q.out | 1134 +++++++++
 .../test/results/clientpositive/llap/union32.q.out |  822 ++++++
 .../test/results/clientpositive/llap/union33.q.out |  460 ++++
 .../test/results/clientpositive/llap/union34.q.out |  446 ++++
 .../clientpositive/{ => llap}/union35.q.out        |   16 +-
 .../test/results/clientpositive/llap/union37.q.out |  590 +++++
 .../clientpositive/llap/union_lateralview.q.out    |  341 +++
 .../results/clientpositive/llap/union_offcbo.q.out | 1858 ++++++++++++++
 .../clientpositive/{ => llap}/union_paren.q.out    |   79 +-
 .../clientpositive/llap/union_pos_alias.q.out      | 2125 ++++++++++++++++
 .../results/clientpositive/llap/union_ppr.q.out    |  661 +++++
 .../clientpositive/{ => llap}/union_remove_1.q.out |  137 +-
 .../{ => llap}/union_remove_10.q.out               |  193 +-
 .../{ => llap}/union_remove_11.q.out               |  186 +-
 .../{ => llap}/union_remove_12.q.out               |  195 +-
 .../{ => llap}/union_remove_13.q.out               |  236 +-
 .../{ => llap}/union_remove_14.q.out               |  195 +-
 .../{ => llap}/union_remove_15.q.out               |  155 +-
 .../{ => llap}/union_remove_16.q.out               |  187 +-
 .../{ => llap}/union_remove_17.q.out               |  128 +-
 .../{ => llap}/union_remove_18.q.out               |  147 +-
 .../clientpositive/llap/union_remove_19.q.out      |  475 ++++
 .../clientpositive/{ => llap}/union_remove_2.q.out |  153 +-
 .../{ => llap}/union_remove_20.q.out               |  149 +-
 .../{ => llap}/union_remove_21.q.out               |  128 +-
 .../{ => llap}/union_remove_22.q.out               |  296 +--
 .../clientpositive/llap/union_remove_23.q.out      |  269 ++
 .../{ => llap}/union_remove_24.q.out               |  153 +-
 .../{ => llap}/union_remove_25.q.out               |  421 ++-
 .../clientpositive/{ => llap}/union_remove_3.q.out |  152 +-
 .../clientpositive/{ => llap}/union_remove_4.q.out |  179 +-
 .../clientpositive/{ => llap}/union_remove_5.q.out |  195 +-
 .../clientpositive/{ => llap}/union_remove_6.q.out |  223 +-
 .../clientpositive/llap/union_remove_6_subq.q.out  | 1243 +++++++++
 .../clientpositive/{ => llap}/union_remove_7.q.out |  139 +-
 .../clientpositive/{ => llap}/union_remove_8.q.out |  155 +-
 .../clientpositive/{ => llap}/union_remove_9.q.out |  218 +-
 .../clientpositive/llap/union_remove_plan.q.out    |  120 +
 .../results/clientpositive/llap/union_view.q.out   | 1092 ++++++++
 .../{ => llap}/unionall_lateralview.q.out          |    2 +-
 .../{ => llap}/unionall_unbalancedppd.q.out        |  135 +-
 .../{ => llap}/updateBasicStats.q.out              |   24 -
 .../clientpositive/{ => llap}/varchar_serde.q.out  |  108 +-
 .../clientpositive/{ => llap}/vector_const.q.out   |   38 +-
 .../llap/vector_decimal_col_scalar_division.q.out  |  166 ++
 .../llap/vector_decimal_partition.q.out            |  178 ++
 .../llap/vector_delete_orig_table.q.out            |  195 ++
 .../clientpositive/llap/vector_empty_where.q.out   |  684 +++++
 .../{ => llap}/vector_gather_stats.q.out           |   30 +-
 .../llap/vector_non_constant_in_expr.q.out         |   59 +
 .../llap/vector_outer_join_no_keys.q.out           |  430 ++++
 .../llap/vector_tablesample_rows.q.out             |  423 +++
 .../llap/vector_windowing_row_number.q.out         |  831 ++++++
 .../llap/vectorization_multi_value.q.out           |  677 +++++
 .../llap/vectorization_numeric_overflows.q.out     | 1366 ++++++++++
 .../llap/vectorization_offset_limit.q.out          |  208 ++
 .../vectorization_parquet_ppd_decimal.q.out        |   74 +-
 .../llap/vectorization_parquet_projection.q.out    |  722 ++++++
 .../{ => llap}/vectorization_sum_if_when.q.out     |    6 +-
 .../clientpositive/llap/vectorized_join46_mr.q.out | 2689 ++++++++++++++++++++
 .../clientpositive/llap/vectorized_mapjoin2.q.out  |  226 ++
 .../clientpositive/{ => llap}/view_alias.q.out     |   88 +-
 .../clientpositive/{ => llap}/view_cbo.q.out       |  433 ++--
 .../{ => llap}/windowing_expressions.q.out         |  840 +++---
 .../clientpositive/llap/windowing_gby2.q.out       |  665 +++++
 .../{ => llap}/windowing_multipartitioning.q.out   |  250 +-
 .../{ => llap}/windowing_navfn.q.out               |  103 +-
 .../{ => llap}/windowing_range_multiorder.q.out    | 1390 +++++-----
 .../{ => llap}/windowing_streaming.q.out           |  346 +--
 .../clientpositive/llap/windowing_udaf.q.out       |  600 +++++
 .../{ => llap}/windowing_windowspec3.q.out         |   36 +-
 .../temp_table_partition_boolexpr.q.out            |  289 ---
 ql/src/test/results/clientpositive/timestamp.q.out |  336 ---
 .../clientpositive/udaf_number_format.q.out        |   85 -
 ql/src/test/results/clientpositive/udf1.q.out      |  206 --
 ql/src/test/results/clientpositive/udf3.q.out      |  127 -
 ql/src/test/results/clientpositive/udf8.q.out      |   95 -
 .../test/results/clientpositive/udf_10_trims.q.out |  158 --
 .../clientpositive/udf_case_column_pruning.q.out   |  153 --
 .../test/results/clientpositive/udf_explode.q.out  |  415 ---
 .../clientpositive/udf_folder_constants.q.out      |  134 -
 .../clientpositive/udf_isops_simplify.q.out        |  368 ---
 .../test/results/clientpositive/udtf_explode.q.out |  575 -----
 .../results/clientpositive/udtf_json_tuple.q.out   |  471 ----
 .../clientpositive/udtf_parse_url_tuple.q.out      |  428 ----
 .../test/results/clientpositive/udtf_stack.q.out   |  223 --
 ql/src/test/results/clientpositive/union10.q.out   |  348 ---
 ql/src/test/results/clientpositive/union11.q.out   |  237 --
 ql/src/test/results/clientpositive/union12.q.out   |  356 ---
 ql/src/test/results/clientpositive/union14.q.out   |  160 --
 ql/src/test/results/clientpositive/union15.q.out   |  187 --
 ql/src/test/results/clientpositive/union16.q.out   |  663 -----
 ql/src/test/results/clientpositive/union20.q.out   |  240 --
 ql/src/test/results/clientpositive/union21.q.out   |  750 ------
 ql/src/test/results/clientpositive/union24.q.out   | 1521 -----------
 ql/src/test/results/clientpositive/union25.q.out   |  269 --
 ql/src/test/results/clientpositive/union27.q.out   |  160 --
 ql/src/test/results/clientpositive/union28.q.out   |  351 ---
 ql/src/test/results/clientpositive/union29.q.out   |  289 ---
 ql/src/test/results/clientpositive/union30.q.out   |  415 ---
 ql/src/test/results/clientpositive/union31.q.out   | 1182 ---------
 ql/src/test/results/clientpositive/union32.q.out   |  791 ------
 ql/src/test/results/clientpositive/union33.q.out   |  554 ----
 ql/src/test/results/clientpositive/union34.q.out   |  428 ----
 ql/src/test/results/clientpositive/union37.q.out   |  546 ----
 .../results/clientpositive/union_lateralview.q.out |  329 ---
 .../test/results/clientpositive/union_offcbo.q.out | 1863 --------------
 .../results/clientpositive/union_pos_alias.q.out   |  658 -----
 ql/src/test/results/clientpositive/union_ppr.q.out |  567 -----
 .../results/clientpositive/union_remove_19.q.out   |  542 ----
 .../results/clientpositive/union_remove_23.q.out   |  268 --
 .../clientpositive/union_remove_6_subq.q.out       | 1263 ---------
 .../results/clientpositive/union_remove_plan.q.out |  142 --
 .../test/results/clientpositive/union_view.q.out   | 1056 --------
 .../vector_decimal_col_scalar_division.q.out       |  138 -
 .../clientpositive/vector_decimal_partition.q.out  |  153 --
 .../clientpositive/vector_delete_orig_table.q.out  |  167 --
 .../clientpositive/vector_empty_where.q.out        |  624 -----
 .../vector_non_constant_in_expr.q.out              |   55 -
 .../clientpositive/vector_outer_join_no_keys.q.out |  333 ---
 .../clientpositive/vector_tablesample_rows.q.out   |  451 ----
 .../vector_windowing_row_number.q.out              |  926 -------
 .../clientpositive/vectorization_multi_value.q.out |  653 -----
 .../vectorization_numeric_overflows.q.out          | 1210 ---------
 .../vectorization_offset_limit.q.out               |  174 --
 .../vectorization_parquet_projection.q.out         |  661 -----
 .../clientpositive/vectorized_join46_mr.q.out      | 2224 ----------------
 .../clientpositive/vectorized_mapjoin2.q.out       |  187 --
 .../results/clientpositive/windowing_gby2.q.out    |  694 -----
 .../results/clientpositive/windowing_udaf.q.out    |  598 -----
 315 files changed, 40128 insertions(+), 39188 deletions(-)

diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 2ad66a6..2b98dc3 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -2730,7 +2730,240 @@ minillaplocal.query.files=\
   temp_table_insert1_overwrite_partitions.q,\
   temp_table_insert2_overwrite_partitions.q,\
   temp_table_join1.q,\
-  temp_table_load_dyn_part1.q
+  temp_table_load_dyn_part1.q,\
+  temp_table_merge_dynamic_partition.q,\
+  temp_table_merge_dynamic_partition2.q,\
+  temp_table_merge_dynamic_partition3.q,\
+  temp_table_merge_dynamic_partition4.q,\
+  temp_table_merge_dynamic_partition5.q,\
+  temp_table_options1.q,\
+  temp_table_parquet_mixed_partition_formats2.q,\
+  temp_table_partition_boolexpr.q,\
+  temp_table_partition_condition_remover.q,\
+  temp_table_partition_ctas.q,\
+  temp_table_partition_multilevels.q,\
+  temp_table_partition_pruning.q,\
+  temp_table_windowing_expressions.q,\
+  test_teradatabinaryfile.q,\
+  timestamp.q,\
+  timestamp_comparison3.q,\
+  timestamp_ints_casts.q,\
+  timestamp_literal.q,\
+  timestamptz.q,\
+  truncate_column_buckets.q,\
+  truncate_column_list_bucket.q,\
+  type_cast_1.q,\
+  type_widening.q,\
+  udaf_binarysetfunctions.q,\
+  udaf_binarysetfunctions_no_cbo.q,\
+  udaf_number_format.q,\
+  udaf_percentile_cont.q,\
+  udaf_percentile_disc.q,\
+  udf1.q,\
+  udf2.q,\
+  udf3.q,\
+  udf4.q,\
+  udf5.q,\
+  udf6.q,\
+  udf7.q,\
+  udf8.q,\
+  udf9.q,\
+  udf_10_trims.q,\
+  udf_E.q,\
+  udf_PI.q,\
+  udf_abs.q,\
+  udf_add_months.q,\
+  udf_aes_decrypt.q,\
+  udf_aes_encrypt.q,\
+  udf_array.q,\
+  udf_ascii.q,\
+  udf_between.q,\
+  udf_bitwise_shiftleft.q,\
+  udf_bitwise_shiftright.q,\
+  udf_bitwise_shiftrightunsigned.q,\
+  udf_case.q,\
+  udf_case_column_pruning.q,\
+  udf_case_thrift.q,\
+  udf_cbrt.q,\
+  udf_character_length.q,\
+  udf_concat_insert1.q,\
+  udf_concat_ws.q,\
+  udf_crc32.q,\
+  udf_current_database.q,\
+  udf_date_format.q,\
+  udf_datetime_legacy_hybrid_calendar.q,\
+  udf_decode.q,\
+  udf_degrees.q,\
+  udf_elt.q,\
+  udf_example_add.q,\
+  udf_explode.q,\
+  udf_factorial.q,\
+  udf_find_in_set.q,\
+  udf_folder_constants.q,\
+  udf_format_number.q,\
+  udf_from_utc_timestamp.q,\
+  udf_get_json_object.q,\
+  udf_greatest.q,\
+  udf_hash.q,\
+  udf_hour.q,\
+  udf_if.q,\
+  udf_in_file.q,\
+  udf_inline.q,\
+  udf_instr.q,\
+  udf_isnull_isnotnull.q,\
+  udf_isops_simplify.q,\
+  udf_java_method.q,\
+  udf_last_day.q,\
+  udf_least.q,\
+  udf_length.q,\
+  udf_levenshtein.q,\
+  udf_like.q,\
+  udf_locate.q,\
+  udf_lower.q,\
+  udf_lpad.q,\
+  udf_map.q,\
+  udf_mask.q,\
+  udf_mask_first_n.q,\
+  udf_mask_hash.q,\
+  udf_mask_last_n.q,\
+  udf_mask_show_first_n.q,\
+  udf_mask_show_last_n.q,\
+  udf_md5.q,\
+  udf_minute.q,\
+  udf_months_between.q,\
+  udf_named_struct.q,\
+  udf_next_day.q,\
+  udf_notequal.q,\
+  udf_nullif.q,\
+  udf_nvl.q,\
+  udf_octet_length.q,\
+  udf_parse_url.q,\
+  udf_position.q,\
+  udf_quarter.q,\
+  udf_radians.q,\
+  udf_reflect.q,\
+  udf_reflect2.q,\
+  udf_repeat.q,\
+  udf_reverse.q,\
+  udf_rpad.q,\
+  udf_second.q,\
+  udf_sha1.q,\
+  udf_sha2.q,\
+  udf_sign.q,\
+  udf_size.q,\
+  udf_sort_array.q,\
+  udf_sort_array_by.q,\
+  udf_soundex.q,\
+  udf_space.q,\
+  udf_split.q,\
+  udf_struct.q,\
+  udf_substring_index.q,\
+  udf_to_unix_timestamp.q,\
+  udf_to_utc_timestamp.q,\
+  udf_trunc.q,\
+  udf_trunc_number.q,\
+  udf_union.q,\
+  udf_when.q,\
+  udf_width_bucket.q,\
+  udtf_explode.q,\
+  udtf_get_sql_schema.q,\
+  udtf_json_tuple.q,\
+  udtf_parse_url_tuple.q,\
+  udtf_stack.q,\
+  unicode_comments.q,\
+  unicode_data.q,\
+  union.q,\
+  union10.q,\
+  union11.q,\
+  union12.q,\
+  union13.q,\
+  union14.q,\
+  union15.q,\
+  union16.q,\
+  union17.q,\
+  union18.q,\
+  union19.q,\
+  union20.q,\
+  union21.q,\
+  union22.q,\
+  union24.q,\
+  union25.q,\
+  union26.q,\
+  union27.q,\
+  union28.q,\
+  union29.q,\
+  union30.q,\
+  union31.q,\
+  union32.q,\
+  union33.q,\
+  union34.q,\
+  union35.q,\
+  union37.q,\
+  union_lateralview.q,\
+  union_offcbo.q,\
+  union_paren.q,\
+  union_pos_alias.q,\
+  union_ppr.q,\
+  union_remove_1.q,\
+  union_remove_10.q,\
+  union_remove_11.q,\
+  union_remove_12.q,\
+  union_remove_13.q,\
+  union_remove_14.q,\
+  union_remove_15.q,\
+  union_remove_16.q,\
+  union_remove_17.q,\
+  union_remove_18.q,\
+  union_remove_19.q,\
+  union_remove_2.q,\
+  union_remove_20.q,\
+  union_remove_21.q,\
+  union_remove_22.q,\
+  union_remove_23.q,\
+  union_remove_24.q,\
+  union_remove_25.q,\
+  union_remove_3.q,\
+  union_remove_4.q,\
+  union_remove_5.q,\
+  union_remove_6.q,\
+  union_remove_6_subq.q,\
+  union_remove_7.q,\
+  union_remove_8.q,\
+  union_remove_9.q,\
+  union_remove_plan.q,\
+  union_view.q,\
+  unionall_lateralview.q,\
+  unionall_unbalancedppd.q,\
+  updateBasicStats.q,\
+  varchar_serde.q,\
+  vector_const.q,\
+  vector_decimal_col_scalar_division.q,\
+  vector_decimal_partition.q,\
+  vector_delete_orig_table.q,\
+  vector_empty_where.q,\
+  vector_gather_stats.q,\
+  vector_non_constant_in_expr.q,\
+  vector_outer_join_no_keys.q,\
+  vector_tablesample_rows.q,\
+  vector_windowing_row_number.q,\
+  vectorization_multi_value.q,\
+  vectorization_numeric_overflows.q,\
+  vectorization_offset_limit.q,\
+  vectorization_parquet_ppd_decimal.q,\
+  vectorization_parquet_projection.q,\
+  vectorization_sum_if_when.q,\
+  vectorized_join46_mr.q,\
+  vectorized_mapjoin2.q,\
+  view_alias.q,\
+  view_cbo.q,\
+  windowing_expressions.q,\
+  windowing_gby2.q,\
+  windowing_multipartitioning.q,\
+  windowing_navfn.q,\
+  windowing_range_multiorder.q,\
+  windowing_streaming.q,\
+  windowing_udaf.q,\
+  windowing_windowspec3.q
 
 encrypted.query.files=encryption_join_unencrypted_tbl.q,\
   encryption_insert_partition_static.q,\
diff --git a/ql/src/test/queries/clientpositive/temp_table_options1.q b/ql/src/test/queries/clientpositive/temp_table_options1.q
index b336267..9a5bc64 100644
--- a/ql/src/test/queries/clientpositive/temp_table_options1.q
+++ b/ql/src/test/queries/clientpositive/temp_table_options1.q
@@ -1,3 +1,4 @@
+-- SORT_QUERY_RESULTS
 -- Delimiter test, taken from delimiter.q
 create temporary table impressions (imp string, msg string)
 row format delimited
@@ -46,7 +47,7 @@ create temporary table date_serde_lb (
 alter table date_serde_lb set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe';
 
 insert overwrite table date_serde_lb 
-  select fl_date, fl_num from date_serde_regex limit 1;
+  select fl_date, fl_num from date_serde_regex order by fl_date, fl_num limit 1;
 
 select * from date_serde_lb;
 select c1, sum(c2) from date_serde_lb group by c1;
@@ -61,7 +62,7 @@ create temporary table date_serde_ls (
 alter table date_serde_ls set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe';
 
 insert overwrite table date_serde_ls 
-  select c1, c2 from date_serde_lb limit 1;
+  select c1, c2 from date_serde_lb order by c1, c2 limit 1;
 
 select * from date_serde_ls;
 select c1, sum(c2) from date_serde_ls group by c1;
@@ -76,7 +77,7 @@ create temporary table date_serde_c (
 alter table date_serde_c set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
 
 insert overwrite table date_serde_c 
-  select c1, c2 from date_serde_ls limit 1;
+  select c1, c2 from date_serde_ls order by c1, c2 limit 1;
 
 select * from date_serde_c;
 select c1, sum(c2) from date_serde_c group by c1;
@@ -91,7 +92,7 @@ create temporary table date_serde_lbc (
 alter table date_serde_lbc set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
 
 insert overwrite table date_serde_lbc 
-  select c1, c2 from date_serde_c limit 1;
+  select c1, c2 from date_serde_c order by c1, c2 limit 1;
 
 select * from date_serde_lbc;
 select c1, sum(c2) from date_serde_lbc group by c1;
@@ -106,7 +107,7 @@ create temporary table date_serde_orc (
 alter table date_serde_orc set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde';
 
 insert overwrite table date_serde_orc 
-  select c1, c2 from date_serde_lbc limit 1;
+  select c1, c2 from date_serde_lbc order by c1, c2 limit 1;
 
 select * from date_serde_orc;
 select c1, sum(c2) from date_serde_orc group by c1;
diff --git a/ql/src/test/queries/clientpositive/temp_table_parquet_mixed_partition_formats2.q b/ql/src/test/queries/clientpositive/temp_table_parquet_mixed_partition_formats2.q
index 9046460..af83631 100644
--- a/ql/src/test/queries/clientpositive/temp_table_parquet_mixed_partition_formats2.q
+++ b/ql/src/test/queries/clientpositive/temp_table_parquet_mixed_partition_formats2.q
@@ -17,17 +17,17 @@ OUTPUTFORMAT
 
 LOAD DATA LOCAL INPATH '../../data/files/sample2.json' INTO TABLE parquet_table_json_partition_temp PARTITION(ts='20150101');
 
-SELECT * FROM parquet_table_json_partition_temp LIMIT 100;
+SELECT * FROM parquet_table_json_partition_temp ORDER BY id, address, reports LIMIT 100;
 
 ALTER TABLE parquet_table_json_partition_temp
 SET FILEFORMAT INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
 OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
 SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe';
 
-SELECT * FROM parquet_table_json_partition_temp LIMIT 100;
+SELECT * FROM parquet_table_json_partition_temp ORDER BY id, address, reports LIMIT 100;
 
-CREATE TEMPORARY TABLE new_table_temp AS SELECT * FROM parquet_table_json_partition_temp LIMIT 100;
+CREATE TEMPORARY TABLE new_table_temp AS SELECT * FROM parquet_table_json_partition_temp ORDER BY id, address, reports LIMIT 100;
 
-SELECT * FROM new_table_temp;
+SELECT * FROM new_table_temp ORDER by id, address, reports;
 
 
diff --git a/ql/src/test/queries/clientpositive/temp_table_partition_multilevels.q b/ql/src/test/queries/clientpositive/temp_table_partition_multilevels.q
index 6232146..50ba7c4 100644
--- a/ql/src/test/queries/clientpositive/temp_table_partition_multilevels.q
+++ b/ql/src/test/queries/clientpositive/temp_table_partition_multilevels.q
@@ -1,36 +1,38 @@
 --! qt:dataset:srcpart
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 create temporary table partition_test_multilevel_temp (key string, value string) partitioned by (level1 string, level2 string, level3 string);
 
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='11') select key, value from srcpart tablesample (11 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='22') select key, value from srcpart tablesample (12 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='33') select key, value from srcpart tablesample (13 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='44') select key, value from srcpart tablesample (14 rows);
-
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='11') select key, value from srcpart tablesample (15 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='22') select key, value from srcpart tablesample (16 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='33') select key, value from srcpart tablesample (17 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='44') select key, value from srcpart tablesample (18 rows);
-
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='11') select key, value from srcpart tablesample (19 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='22') select key, value from srcpart tablesample (20 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='33') select key, value from srcpart tablesample (21 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='44') select key, value from srcpart tablesample (22 rows);
-
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='11') select key, value from srcpart tablesample (11 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='22') select key, value from srcpart tablesample (12 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='33') select key, value from srcpart tablesample (13 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='44') select key, value from srcpart tablesample (14 rows);
-
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='11') select key, value from srcpart tablesample (15 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='22') select key, value from srcpart tablesample (16 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='33') select key, value from srcpart tablesample (17 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='44') select key, value from srcpart tablesample (18 rows);
-
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='11') select key, value from srcpart tablesample (19 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='22') select key, value from srcpart tablesample (20 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='33') select key, value from srcpart tablesample (21 rows);
-insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='44') select key, value from srcpart tablesample (22 rows);
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='11') select key, value from srcpart tablesample (11 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='22') select key, value from srcpart tablesample (12 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='33') select key, value from srcpart tablesample (13 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='44') select key, value from srcpart tablesample (14 rows) order by key, value;
+
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='11') select key, value from srcpart tablesample (15 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='22') select key, value from srcpart tablesample (16 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='33') select key, value from srcpart tablesample (17 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='44') select key, value from srcpart tablesample (18 rows) order by key, value;
+
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='11') select key, value from srcpart tablesample (19 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='22') select key, value from srcpart tablesample (20 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='33') select key, value from srcpart tablesample (21 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='44') select key, value from srcpart tablesample (22 rows) order by key, value;
+
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='11') select key, value from srcpart tablesample (11 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='22') select key, value from srcpart tablesample (12 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='33') select key, value from srcpart tablesample (13 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='44') select key, value from srcpart tablesample (14 rows) order by key, value;
+
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='11') select key, value from srcpart tablesample (15 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='22') select key, value from srcpart tablesample (16 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='33') select key, value from srcpart tablesample (17 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='44') select key, value from srcpart tablesample (18 rows) order by key, value;
+
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='11') select key, value from srcpart tablesample (19 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='22') select key, value from srcpart tablesample (20 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='33') select key, value from srcpart tablesample (21 rows) order by key, value;
+insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='44') select key, value from srcpart tablesample (22 rows) order by key, value;
 
 set metaconf:hive.metastore.try.direct.sql=false;
 
diff --git a/ql/src/test/queries/clientpositive/udf_concat_insert1.q b/ql/src/test/queries/clientpositive/udf_concat_insert1.q
index f735da6..eeef058 100644
--- a/ql/src/test/queries/clientpositive/udf_concat_insert1.q
+++ b/ql/src/test/queries/clientpositive/udf_concat_insert1.q
@@ -1,4 +1,6 @@
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 CREATE TABLE dest1_n139(key INT, value STRING) STORED AS TEXTFILE;
 
 FROM src
diff --git a/ql/src/test/queries/clientpositive/udf_explode.q b/ql/src/test/queries/clientpositive/udf_explode.q
index ec3f6da..7825eca 100644
--- a/ql/src/test/queries/clientpositive/udf_explode.q
+++ b/ql/src/test/queries/clientpositive/udf_explode.q
@@ -4,22 +4,22 @@ set hive.fetch.task.conversion=more;
 DESCRIBE FUNCTION explode;
 DESCRIBE FUNCTION EXTENDED explode;
 
-EXPLAIN EXTENDED SELECT explode(array(1,2,3)) AS myCol FROM src tablesample (1 rows);
-EXPLAIN EXTENDED SELECT a.myCol, count(1) FROM (SELECT explode(array(1,2,3)) AS myCol FROM src tablesample (1 rows)) a GROUP BY a.myCol;
+EXPLAIN EXTENDED SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows) ORDER BY myCol;
+EXPLAIN EXTENDED SELECT a.myCol, count(1) FROM (SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows)) a GROUP BY a.myCol ORDER BY a.myCol;
 
-SELECT explode(array(1,2,3)) AS myCol FROM src tablesample (1 rows);
-SELECT explode(array(1,2,3)) AS (myCol) FROM src tablesample (1 rows);
-SELECT a.myCol, count(1) FROM (SELECT explode(array(1,2,3)) AS myCol FROM src tablesample (1 rows)) a GROUP BY a.myCol;
+SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows) ORDER BY myCol;
+SELECT explode(array(1, 2, 3)) AS (myCol) FROM src tablesample (1 rows) ORDER BY myCol;
+SELECT a.myCol, count(1) FROM (SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows)) a GROUP BY a.myCol ORDER BY a.myCol;
 
-EXPLAIN EXTENDED SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src tablesample (1 rows);
-EXPLAIN EXTENDED SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src tablesample (1 rows)) a GROUP BY a.key, a.val;
+EXPLAIN EXTENDED SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, val;
+EXPLAIN EXTENDED SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, value) a GROUP BY a.key, a.val ORDER BY a.key, a.val;
 
-SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src tablesample (1 rows);
-SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src tablesample (1 rows)) a GROUP BY a.key, a.val;
+SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, val;
+SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, val) a GROUP BY a.key, a.val ORDER BY a.key, a.val;
 
 drop table lazy_array_map;
 create table lazy_array_map (map_col map<int,string>, array_col array<string>);
-INSERT OVERWRITE TABLE lazy_array_map select map(1,'one',2,'two',3,'three'), array('100','200','300') FROM src tablesample (1 rows);
+INSERT OVERWRITE TABLE lazy_array_map select map(1, 'one', 2, 'two', 3, 'three'), array('100', '200', '300') FROM src tablesample (1 rows);
 
-SELECT array_col, myCol from lazy_array_map lateral view explode(array_col) X AS myCol;
-SELECT map_col, myKey, myValue from lazy_array_map lateral view explode(map_col) X AS myKey, myValue;
\ No newline at end of file
+SELECT array_col, myCol FROM lazy_array_map lateral view explode(array_col) X AS myCol ORDER BY array_col, myCol;
+SELECT map_col, myKey, myValue FROM lazy_array_map lateral view explode(map_col) X AS myKey, myValue ORDER BY map_col, myKey, myValue;
diff --git a/ql/src/test/queries/clientpositive/udf_sort_array_by.q b/ql/src/test/queries/clientpositive/udf_sort_array_by.q
index 4c8c878..d2ceb6e 100644
--- a/ql/src/test/queries/clientpositive/udf_sort_array_by.q
+++ b/ql/src/test/queries/clientpositive/udf_sort_array_by.q
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 use default;
 -- Test sort_array_by() UDF
 
diff --git a/ql/src/test/queries/clientpositive/udf_width_bucket.q b/ql/src/test/queries/clientpositive/udf_width_bucket.q
index 9fce6fc..324d996 100644
--- a/ql/src/test/queries/clientpositive/udf_width_bucket.q
+++ b/ql/src/test/queries/clientpositive/udf_width_bucket.q
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 describe function width_bucket;
 desc function extended width_bucket;
 
diff --git a/ql/src/test/queries/clientpositive/udtf_explode.q b/ql/src/test/queries/clientpositive/udtf_explode.q
index 5661d6c..39d429d 100644
--- a/ql/src/test/queries/clientpositive/udtf_explode.q
+++ b/ql/src/test/queries/clientpositive/udtf_explode.q
@@ -5,21 +5,21 @@ set hive.fetch.task.conversion=more;
 DESCRIBE FUNCTION explode;
 DESCRIBE FUNCTION EXTENDED explode;
 
-EXPLAIN EXTENDED SELECT explode(array(1,2,3)) AS myCol FROM src LIMIT 3;
-EXPLAIN EXTENDED SELECT a.myCol, count(1) FROM (SELECT explode(array(1,2,3)) AS myCol FROM src LIMIT 3) a GROUP BY a.myCol;
+EXPLAIN EXTENDED SELECT explode(array(1, 2, 3)) AS myCol FROM src LIMIT 3;
+EXPLAIN EXTENDED SELECT a.myCol, count(1) FROM (SELECT explode(array(1, 2, 3)) AS myCol FROM src LIMIT 3) a GROUP BY a.myCol;
 
-SELECT explode(array(1,2,3)) AS myCol FROM src LIMIT 3;
-SELECT explode(array(1,2,3)) AS (myCol) FROM src LIMIT 3;
-SELECT a.myCol, count(1) FROM (SELECT explode(array(1,2,3)) AS myCol FROM src LIMIT 3) a GROUP BY a.myCol;
+SELECT explode(array(1, 2, 3)) AS myCol FROM src ORDER BY myCol LIMIT 3;
+SELECT explode(array(1, 2, 3)) AS (myCol) FROM src ORDER BY myCol LIMIT 3;
+SELECT a.myCol, count(1) FROM (SELECT explode(array(1, 2, 3)) AS myCol FROM src LIMIT 3) a GROUP BY a.myCol ORDER BY a.myCol;
 
-EXPLAIN SELECT explode(map(1,'one',2,'two',3,'three')) as (myKey,myVal) FROM src LIMIT 3;
-EXPLAIN EXTENDED SELECT a.myKey, a.myVal, count(1) FROM (SELECT explode(map(1,'one',2,'two',3,'three')) as (myKey,myVal) FROM src LIMIT 3) a GROUP BY a.myKey, a.myVal;
+EXPLAIN SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) as (myKey, myVal) FROM src ORDER BY myKey, myVal LIMIT 3;
+EXPLAIN EXTENDED SELECT a.myKey, a.myVal, count(1) FROM (SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) as (myKey, myVal) FROM src LIMIT 3) a GROUP BY a.myKey, a.myVal ORDER BY a.myKey, a.myVal;
 
-SELECT explode(map(1,'one',2,'two',3,'three')) as (myKey,myVal) FROM src LIMIT 3;
-SELECT a.myKey, a.myVal, count(1) FROM (SELECT explode(map(1,'one',2,'two',3,'three')) as (myKey,myVal) FROM src LIMIT 3) a GROUP BY a.myKey, a.myVal;
+SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) as (myKey, myVal) FROM src ORDER BY myKey, myVal LIMIT 3;
+SELECT a.myKey, a.myVal, count(1) FROM (SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) as (myKey, myVal) FROM src LIMIT 3) a GROUP BY a.myKey, a.myVal ORDER BY a.myKey, a.myVal;
 
-SELECT src.key, myCol FROM src lateral view explode(array(1,2,3)) x AS myCol LIMIT 3;
-SELECT src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3;
+SELECT src.key, myCol FROM src lateral view explode(array(1, 2, 3)) x AS myCol ORDER BY src.key, myCol LIMIT 3;
+SELECT src.key, myKey, myVal FROM src lateral view explode(map(1, 'one', 2, 'two', 3, 'three')) x AS myKey, myVal ORDER BY src.key, myKey, myVal LIMIT 3;
 
 -- HIVE-4295
-SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3;
+SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1, 'one', 2, 'two', 3, 'three')) x AS myKey, myVal ORDER BY BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal LIMIT 3;
diff --git a/ql/src/test/queries/clientpositive/udtf_parse_url_tuple.q b/ql/src/test/queries/clientpositive/udtf_parse_url_tuple.q
index 5e672b9..66aab52 100644
--- a/ql/src/test/queries/clientpositive/udtf_parse_url_tuple.q
+++ b/ql/src/test/queries/clientpositive/udtf_parse_url_tuple.q
@@ -1,4 +1,6 @@
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 create table url_t (key string, fullurl string);
 
diff --git a/ql/src/test/queries/clientpositive/union.q b/ql/src/test/queries/clientpositive/union.q
index 3f40a25..5d50be3 100644
--- a/ql/src/test/queries/clientpositive/union.q
+++ b/ql/src/test/queries/clientpositive/union.q
@@ -1,6 +1,5 @@
 --! qt:dataset:src
 set hive.mapred.mode=nonstrict;
--- SORT_BEFORE_DIFF
 -- union case: both subqueries are map jobs on same input, followed by filesink
 
 EXPLAIN
@@ -9,13 +8,13 @@ FROM (
   UNION ALL
   FROM src SELECT src.* WHERE src.key > 100
 ) unioninput
-INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.*;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.* ORDER BY key, value;
 
 FROM (
   FROM src select src.key, src.value WHERE src.key < 100
   UNION ALL
   FROM src SELECT src.* WHERE src.key > 100
 ) unioninput
-INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.*;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.* ORDER BY key, value;
 
 dfs -cat ${system:test.warehouse.dir}/union.out/*;
diff --git a/ql/src/test/queries/clientpositive/union13.q b/ql/src/test/queries/clientpositive/union13.q
index 6b8608e..826c96e 100644
--- a/ql/src/test/queries/clientpositive/union13.q
+++ b/ql/src/test/queries/clientpositive/union13.q
@@ -1,6 +1,6 @@
 --! qt:dataset:src
 set hive.mapred.mode=nonstrict;
--- SORT_BEFORE_DIFF
+-- SORT_QUERY_RESULTS
 -- union case: both subqueries are a map-only jobs, same input, followed by filesink
 
 explain 
diff --git a/ql/src/test/queries/clientpositive/union15.q b/ql/src/test/queries/clientpositive/union15.q
index d2590f8..70524f2 100644
--- a/ql/src/test/queries/clientpositive/union15.q
+++ b/ql/src/test/queries/clientpositive/union15.q
@@ -1,8 +1,9 @@
 --! qt:dataset:src1
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 set hive.map.aggr = true;
--- SORT_BEFORE_DIFF
 -- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink
 
 explain 
diff --git a/ql/src/test/queries/clientpositive/union35.q b/ql/src/test/queries/clientpositive/union35.q
index 38e9561..99d271c 100644
--- a/ql/src/test/queries/clientpositive/union35.q
+++ b/ql/src/test/queries/clientpositive/union35.q
@@ -1,4 +1,6 @@
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 select * from (
      select * from ( select 1 as id , 'foo' as str_1 from src tablesample(5 rows)) f
  union all
diff --git a/ql/src/test/queries/clientpositive/union_pos_alias.q b/ql/src/test/queries/clientpositive/union_pos_alias.q
index fd5c19f..d881466 100644
--- a/ql/src/test/queries/clientpositive/union_pos_alias.q
+++ b/ql/src/test/queries/clientpositive/union_pos_alias.q
@@ -1,4 +1,6 @@
 --! qt:dataset:src
+-- SORT_QUERY_RESULTS
+
 set hive.mapred.mode=nonstrict;
 
 
@@ -14,7 +16,7 @@ select key, value from (select 'tst2' as key, count(1) as value from src s2 UNIO
 order by 1;
 
 drop table src_10;
-create table src_10 as select * from src limit 10;
+create table src_10 as select * from src order by key, value limit 10;
 
 explain 
 select key as value, value as key from src_10
diff --git a/ql/src/test/queries/clientpositive/varchar_serde.q b/ql/src/test/queries/clientpositive/varchar_serde.q
index 7b3d7a9..426b09c 100644
--- a/ql/src/test/queries/clientpositive/varchar_serde.q
+++ b/ql/src/test/queries/clientpositive/varchar_serde.q
@@ -22,8 +22,8 @@ stored as textfile;
 
 load data local inpath '../../data/files/srcbucket0.txt' overwrite into table varchar_serde_regex;
 
-select * from varchar_serde_regex limit 5;
-select value, count(*) from varchar_serde_regex group by value limit 5;
+select * from varchar_serde_regex order by key, value limit 5;
+select value, count(*) from varchar_serde_regex group by value order by value limit 5;
 
 --
 -- LazyBinary
@@ -36,8 +36,8 @@ alter table varchar_serde_lb set serde 'org.apache.hadoop.hive.serde2.lazybinary
 
 insert overwrite table varchar_serde_lb
   select key, value from varchar_serde_regex;
-select * from varchar_serde_lb limit 5;
-select value, count(*) from varchar_serde_lb group by value limit 5;
+select * from varchar_serde_lb order by key, value limit 5;
+select value, count(*) from varchar_serde_lb group by value order by value  limit 5;
 
 --
 -- LazySimple
@@ -50,8 +50,8 @@ alter table varchar_serde_ls set serde 'org.apache.hadoop.hive.serde2.lazy.LazyS
 
 insert overwrite table varchar_serde_ls
   select key, value from varchar_serde_lb;
-select * from varchar_serde_ls limit 5;
-select value, count(*) from varchar_serde_ls group by value limit 5;
+select * from varchar_serde_ls order by key, value limit 5;
+select value, count(*) from varchar_serde_ls group by value order by value limit 5;
 
 --
 -- Columnar
@@ -64,8 +64,8 @@ alter table varchar_serde_c set serde 'org.apache.hadoop.hive.serde2.columnar.Co
 
 insert overwrite table varchar_serde_c
   select key, value from varchar_serde_ls;
-select * from varchar_serde_c limit 5;
-select value, count(*) from varchar_serde_c group by value limit 5;
+select * from varchar_serde_c order by key, value limit 5;
+select value, count(*) from varchar_serde_c group by value order by value limit 5;
 
 --
 -- LazyBinaryColumnar
@@ -78,8 +78,8 @@ alter table varchar_serde_lbc set serde 'org.apache.hadoop.hive.serde2.columnar.
 
 insert overwrite table varchar_serde_lbc
   select key, value from varchar_serde_c;
-select * from varchar_serde_lbc limit 5;
-select value, count(*) from varchar_serde_lbc group by value limit 5;
+select * from varchar_serde_lbc order by key, value limit 5;
+select value, count(*) from varchar_serde_lbc group by value order by value limit 5;
 
 --
 -- ORC
@@ -93,8 +93,8 @@ alter table varchar_serde_orc set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSer
 
 insert overwrite table varchar_serde_orc
   select key, value from varchar_serde_lbc;
-select * from varchar_serde_orc limit 5;
-select value, count(*) from varchar_serde_orc group by value limit 5;
+select * from varchar_serde_orc order by key, value limit 5;
+select value, count(*) from varchar_serde_orc group by value order by value limit 5;
 
 drop table if exists varchar_serde_regex;
 drop table if exists varchar_serde_lb;
diff --git a/ql/src/test/queries/clientpositive/vectorization_parquet_ppd_decimal.q b/ql/src/test/queries/clientpositive/vectorization_parquet_ppd_decimal.q
index 02f4739..4e1fcf0 100644
--- a/ql/src/test/queries/clientpositive/vectorization_parquet_ppd_decimal.q
+++ b/ql/src/test/queries/clientpositive/vectorization_parquet_ppd_decimal.q
@@ -10,7 +10,12 @@ set hive.llap.cache.allow.synthetic.fileid=true;
 
 create table newtypestbl_n1(c char(10), v varchar(10), d decimal(5,3), da date) stored as parquet;
 
-insert overwrite table newtypestbl_n1 select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) uniontbl;
+insert overwrite table newtypestbl_n1
+select *
+  from (select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 limit 5) u1
+        union all
+        select * from (select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 5) u2
+       ) uniontbl;
 
 -- decimal data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
 select * from newtypestbl_n1 where d=0.22;
diff --git a/ql/src/test/queries/clientpositive/view_alias.q b/ql/src/test/queries/clientpositive/view_alias.q
index 1f3f3b1..6f5fd36 100644
--- a/ql/src/test/queries/clientpositive/view_alias.q
+++ b/ql/src/test/queries/clientpositive/view_alias.q
@@ -1,8 +1,8 @@
 --! qt:dataset:src
 drop view v_n6;
-create view v_n6 as select key, '12' from src;
+create view v_n6 as select 10 - key, '12' from src;
 desc formatted v_n6;
-select * from v_n6 order by `_c1` limit 5;
+select * from v_n6 order by `_c0`, `_c1` limit 5;
 
 drop view v_n6;
 create view v_n6 as select key as `_c1`, '12' from src;
@@ -12,24 +12,24 @@ select * from v_n6 order by `_c1` limit 5;
 drop view v_n6;
 create view v_n6 as select *, '12' from src;
 desc formatted v_n6;
-select * from v_n6 order by `_c2` limit 5;
+select * from v_n6 order by key, value, `_c2` limit 5;
 
 drop view v_n6;
 create view v_n6 as select *, '12' as `_c121` from src;
 desc formatted v_n6;
-select * from v_n6 order by `_c121` limit 5;
+select * from v_n6 order by key, value, `_c121` limit 5;
 
 drop view v_n6;
 create view v_n6 as select key, count(*) from src group by key;
 desc formatted v_n6;
-select * from v_n6 order by `_c1` limit 5;
+select * from v_n6 order by key, `_c1` limit 5;
 
 
 drop view v_n6;
 create table a_n9 (ca_n9 string, caa_n9 string);
 create table b_n7 (cb_n7 string, cbb_n7 string);
-insert into a_n9 select * from src limit 5;
-insert into b_n7 select * from src limit 5;
+insert into a_n9 select * from src order by key, value limit 5;
+insert into b_n7 select * from src order by key, value limit 5;
 create view v_n6 as select '010', a_n9.*, 121, b_n7.*, 234 from a_n9 join b_n7 on a_n9.ca_n9 = b_n7.cb_n7;
 desc formatted v_n6;
-select * from v_n6 order by `_c3` limit 5;
+select * from v_n6 order by `_c3`, `_c0`, ca_n9, caa_n9, cb_n7, cbb_n7 limit 5;
diff --git a/ql/src/test/queries/clientpositive/windowing_expressions.q b/ql/src/test/queries/clientpositive/windowing_expressions.q
index 09e759b..855257b 100644
--- a/ql/src/test/queries/clientpositive/windowing_expressions.q
+++ b/ql/src/test/queries/clientpositive/windowing_expressions.q
@@ -31,10 +31,10 @@ sum(p_retailprice) over (distribute by p_mfgr sort by p_retailprice rows between
 from part
 ;
 
-select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k_n22 limit 100;
-select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k_n22 limit 100;
-select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k_n22 limit 100;
-select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k_n22 limit 100;
+select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k_n22 order by s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) limit 100;
+select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k_n22 order by s, i, i - lead(i, 3, 0) over (partition by si order by i,s)limit 100;
+select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k_n22 order by s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) limit 100;
+select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k_n22 order by s, lag(s, 3, 'fred') over (partition by f order by b) limit 100;
 
 select p_mfgr, avg(p_retailprice) over(partition by p_mfgr, p_type order by p_mfgr) from part;
 
@@ -44,11 +44,12 @@ select p_mfgr, avg(p_retailprice) over(partition by p_mfgr order by p_type,p_mfg
 create table t1_n142 (a1 int, b1 string); 
 create table t2_n83 (a1 int, b1 string);
 from (select sum(i) over (partition by ts order by i), s from over10k_n22) tt insert overwrite table t1_n142 select * insert overwrite table t2_n83 select * ;
-select * from t1_n142 limit 3;
-select * from t2_n83 limit 3;
+select * from t1_n142 order by a1, b1 limit 3;
+select * from t2_n83 order by a1, b1 limit 3;
 
 select p_mfgr, p_retailprice, p_size,
 round(sum(p_retailprice) over w1 , 2) + 50.0 = round(sum(lag(p_retailprice,1,50.0)) over w1 + (last_value(p_retailprice) over w1),2)
 from part
 window w1 as (distribute by p_mfgr sort by p_retailprice)
+order by p_mfgr, p_retailprice, p_size
 limit 11;
diff --git a/ql/src/test/queries/clientpositive/windowing_multipartitioning.q b/ql/src/test/queries/clientpositive/windowing_multipartitioning.q
index 622c244..2a23d56 100644
--- a/ql/src/test/queries/clientpositive/windowing_multipartitioning.q
+++ b/ql/src/test/queries/clientpositive/windowing_multipartitioning.q
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 drop table over10k_n11;
 
 create table over10k_n11(
@@ -17,7 +19,7 @@ create table over10k_n11(
 
 load data local inpath '../../data/files/over10k' into table over10k_n11;
 
-select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k_n11 limit 100;
+select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k_n11 order by s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) limit 100;
 
 select s, 
 rank() over (partition by s order by `dec` desc), 
diff --git a/ql/src/test/queries/clientpositive/windowing_range_multiorder.q b/ql/src/test/queries/clientpositive/windowing_range_multiorder.q
index a09c717..8858466 100644
--- a/ql/src/test/queries/clientpositive/windowing_range_multiorder.q
+++ b/ql/src/test/queries/clientpositive/windowing_range_multiorder.q
@@ -17,24 +17,24 @@ create table over10k_n17(
 
 load data local inpath '../../data/files/over10k' into table over10k_n17;
 
-select first_value(t) over ( partition by si order by i, b ) from over10k_n17 limit 100;
+select first_value(t) over ( partition by si order by i, b ) fv from over10k_n17 order by fv limit 100;
 
-select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k_n17 limit 100;
+select last_value(i) over (partition by si, bo order by i, f desc range current row) lv from over10k_n17 order by lv limit 100;
 
-select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k_n17 limit 100;
+select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) rn from over10k_n17 order by rn limit 100;
 
-select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k_n17 limit 100;
+select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) a from over10k_n17 order by s, si, i, a limit 100;
 
-select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n17 limit 100;
+select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) a from over10k_n17 order by s, si, i, a limit 100;
 
-select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k_n17 limit 100;
+select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) m from over10k_n17 order by s, si, i, m limit 100;
 
-select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k_n17 limit 100;
+select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) a from over10k_n17 order by s, si, i, a limit 100;
 
-select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k_n17 limit 100;
+select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) m from over10k_n17 order by si, bo, i, f, m limit 100;
 
-select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100;
+select bo, rank() over (partition by i order by bo nulls first, b nulls last range between unbounded preceding and unbounded following) r from over10k_n17 order by bo, r limit 100;
 
-select CAST(s as CHAR(12)), rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100;
+select CAST(s as CHAR(12)) s, rank() over (partition by i order by CAST(s as CHAR(12)) nulls last range between unbounded preceding and unbounded following) r from over10k_n17 order by s, r limit 100;
 
-select CAST(s as VARCHAR(12)), rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) from over10k_n17 limit 100;
+select CAST(s as VARCHAR(12)) s, rank() over (partition by i order by CAST(s as VARCHAR(12)) nulls last range between unbounded preceding and unbounded following) r from over10k_n17 order by s, r limit 100;
diff --git a/ql/src/test/queries/clientpositive/windowing_udaf.q b/ql/src/test/queries/clientpositive/windowing_udaf.q
index 2e7da0d..3aab200 100644
--- a/ql/src/test/queries/clientpositive/windowing_udaf.q
+++ b/ql/src/test/queries/clientpositive/windowing_udaf.q
@@ -18,16 +18,17 @@ create table over10k_n4(
 
 load data local inpath '../../data/files/over10k' into table over10k_n4;
 
-select s, min(i) over (partition by s) from over10k_n4 limit 100;
+select s, min(i) over (partition by s) m from over10k_n4 order by s, m limit 100;
 
-select s, avg(f) over (partition by si order by s) from over10k_n4 limit 100;
+select s, avg(f) over (partition by si order by s) a from over10k_n4 order by s, a limit 100;
 
-select s, avg(i) over (partition by t, b order by s) from over10k_n4 limit 100;
+select s, avg(i) over (partition by t, b order by s) a from over10k_n4 order by s, a limit 100;
 
-select max(i) over w from over10k_n4 window w as (partition by f) limit 100;
+select max(i) over w m from over10k_n4 window w as (partition by f) order by m limit 100;
 
-select s, avg(d) over (partition by t order by f) from over10k_n4 limit 100;
+select s, avg(d) over (partition by t order by f) a from over10k_n4 order by s, a limit 100;
 
 select key, max(value) over
-  (order by key rows between 10 preceding and 20 following)
-from src1 where length(key) > 10;
+  (order by key rows between 10 preceding and 20 following) m
+from src1 where length(key) > 10
+order by key, m;
diff --git a/ql/src/test/queries/clientpositive/windowing_windowspec3.q b/ql/src/test/queries/clientpositive/windowing_windowspec3.q
index debd076..fac52b6 100644
--- a/ql/src/test/queries/clientpositive/windowing_windowspec3.q
+++ b/ql/src/test/queries/clientpositive/windowing_windowspec3.q
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
 -- Test value based windowing spec
 
 drop table if exists emp_n0;
diff --git a/ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition.q.out b/ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition.q.out
similarity index 84%
rename from ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition.q.out
index 8b1cfad..abb4f77 100644
--- a/ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition.q.out
@@ -60,41 +60,55 @@ POSTHOOK: Input: default@srcpart_merge_dp_n1_temp
 POSTHOOK: Input: default@srcpart_merge_dp_n1_temp@ds=2008-04-08/hr=11
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart_merge_dp_n1_temp
-            filterExpr: (ds = '2008-04-08') (type: boolean)
-            Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
-            Select Operator
-              expressions: key (type: string), value (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
-              Reduce Output Operator
-                key expressions: _col2 (type: string)
-                null sort order: a
-                sort order: +
-                Map-reduce partition columns: _col2 (type: string)
-                Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
-                value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string)
-          outputColumnNames: _col0, _col1, _col2
-          File Output Operator
-            compressed: false
-            Dp Sort State: PARTITION_SORTED
-            Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.merge_dynamic_part_n1_temp
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_merge_dp_n1_temp
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
+                  Select Operator
+                    expressions: key (type: string), value (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: string)
+                      Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
+                      value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                File Output Operator
+                  compressed: false
+                  Dp Sort State: PARTITION_SORTED
+                  Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.merge_dynamic_part_n1_temp
+
+  Stage: Stage-2
+    Dependency Collection
 
   Stage: Stage-0
     Move Operator
@@ -109,7 +123,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.merge_dynamic_part_n1_temp
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats Work
       Basic Stats Work:
 
@@ -666,79 +680,79 @@ POSTHOOK: Input: default@srcpart_merge_dp_n1_temp@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@merge_dynamic_part_n1_temp@ds=2008-04-08/hr=11
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart_merge_dp_n1_temp
-            filterExpr: (ds = '2008-04-08') (type: boolean)
-            Statistics: Num rows: 99 Data size: 31648 Basic stats: PARTIAL Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 99 Data size: 31648 Basic stats: PARTIAL Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 99 Data size: 31648 Basic stats: PARTIAL Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.merge_dynamic_part_n1_temp
-              Select Operator
-                expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), '11' (type: string)
-                outputColumnNames: key, value, ds, hr
-                Statistics: Num rows: 99 Data size: 31648 Basic stats: PARTIAL Column stats: NONE
-                Group By Operator
-                  aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll')
-                  keys: ds (type: string), hr (type: string)
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_merge_dp_n1_temp
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
                   Statistics: Num rows: 99 Data size: 31648 Basic stats: PARTIAL Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    null sort order: zz
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 99 Data size: 31648 Basic stats: PARTIAL Column stats: NONE
-                    value expressions: _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 49 Data size: 15664 Basic stats: PARTIAL Column stats: NONE
-          Select Operator
-            expressions: _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 49 Data size: 15664 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 49 Data size: 15664 Basic stats: PARTIAL Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-7
-    Conditional Operator
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 99 Data size: 31648 Basic stats: PARTIAL Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.merge_dynamic_part_n1_temp
+                    Select Operator
+                      expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), '11' (type: string)
+                      outputColumnNames: key, value, ds, hr
+                      Statistics: Num rows: 99 Data size: 31648 Basic stats: PARTIAL Column stats: NONE
+                      Group By Operator
+                        aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll')
+                        keys: ds (type: string), hr (type: string)
+                        minReductionHashAggr: 0.99
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2, _col3
+                        Statistics: Num rows: 99 Data size: 31648 Basic stats: PARTIAL Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string), _col1 (type: string)
+                          null sort order: zz
+                          sort order: ++
+                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                          Statistics: Num rows: 99 Data size: 31648 Basic stats: PARTIAL Column stats: NONE
+                          value expressions: _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 49 Data size: 15664 Basic stats: PARTIAL Column stats: NONE
+                Select Operator
+                  expressions: _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 49 Data size: 15664 Basic stats: PARTIAL Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 49 Data size: 15664 Basic stats: PARTIAL Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
+  Stage: Stage-2
+    Dependency Collection
 
   Stage: Stage-0
     Move Operator
@@ -753,7 +767,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.merge_dynamic_part_n1_temp
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats Work
       Basic Stats Work:
       Column Stats Desc:
@@ -761,36 +775,6 @@ STAGE PLANS:
           Column Types: string, string
           Table: default.merge_dynamic_part_n1_temp
 
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.merge_dynamic_part_n1_temp
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.merge_dynamic_part_n1_temp
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: insert overwrite table merge_dynamic_part_n1_temp partition (ds='2008-04-08', hr=11) select key, value from srcpart_merge_dp_n1_temp where ds='2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_merge_dp_n1_temp
@@ -1343,55 +1327,55 @@ POSTHOOK: Input: default@srcpart_merge_dp_n1_temp
 POSTHOOK: Input: default@srcpart_merge_dp_n1_temp@ds=2008-04-08/hr=11
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart_merge_dp_n1_temp
-            filterExpr: ((ds = '2008-04-08') and (11.0D = 11.0D)) (type: boolean)
-            Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
-            Select Operator
-              expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
-              Reduce Output Operator
-                key expressions: _col2 (type: string), _col3 (type: string)
-                null sort order: aa
-                sort order: ++
-                Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
-                Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
-                value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          File Output Operator
-            compressed: false
-            Dp Sort State: PARTITION_SORTED
-            Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.merge_dynamic_part_n1_temp
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_merge_dp_n1_temp
+                  filterExpr: ((ds = '2008-04-08') and (11.0D = 11.0D)) (type: boolean)
+                  Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
+                  Select Operator
+                    expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3
+                    Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string), _col3 (type: string)
+                      null sort order: aa
+                      sort order: ++
+                      Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+                      Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
+                      value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                File Output Operator
+                  compressed: false
+                  Dp Sort State: PARTITION_SORTED
+                  Statistics: Num rows: 99 Data size: 49864 Basic stats: PARTIAL Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.merge_dynamic_part_n1_temp
+
+  Stage: Stage-2
+    Dependency Collection
 
   Stage: Stage-0
     Move Operator
@@ -1406,40 +1390,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.merge_dynamic_part_n1_temp
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats Work
       Basic Stats Work:
 
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.merge_dynamic_part_n1_temp
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.merge_dynamic_part_n1_temp
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: insert overwrite table merge_dynamic_part_n1_temp partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n1_temp where ds='2008-04-08' and hr=11
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_merge_dp_n1_temp
diff --git a/ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition2.q.out b/ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition2.q.out
similarity index 72%
rename from ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition2.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition2.q.out
index 413a3f2..c1fb807 100644
--- a/ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition2.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition2.q.out
@@ -79,55 +79,55 @@ POSTHOOK: Input: default@srcpart_merge_dp_n0_temp@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart_merge_dp_n0_temp@ds=2008-04-08/hr=12
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart_merge_dp_n0_temp
-            filterExpr: (ds = '2008-04-08') (type: boolean)
-            Statistics: Num rows: 297 Data size: 148488 Basic stats: PARTIAL Column stats: PARTIAL
-            Select Operator
-              expressions: key (type: string), value (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 297 Data size: 148488 Basic stats: PARTIAL Column stats: PARTIAL
-              Reduce Output Operator
-                key expressions: _col2 (type: string)
-                null sort order: a
-                sort order: +
-                Map-reduce partition columns: _col2 (type: string)
-                Statistics: Num rows: 297 Data size: 148488 Basic stats: PARTIAL Column stats: PARTIAL
-                value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string)
-          outputColumnNames: _col0, _col1, _col2
-          File Output Operator
-            compressed: false
-            Dp Sort State: PARTITION_SORTED
-            Statistics: Num rows: 297 Data size: 148488 Basic stats: PARTIAL Column stats: PARTIAL
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.merge_dynamic_part_n0_temp
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_merge_dp_n0_temp
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 297 Data size: 148488 Basic stats: PARTIAL Column stats: PARTIAL
+                  Select Operator
+                    expressions: key (type: string), value (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 297 Data size: 148488 Basic stats: PARTIAL Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: string)
+                      Statistics: Num rows: 297 Data size: 148488 Basic stats: PARTIAL Column stats: PARTIAL
+                      value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                File Output Operator
+                  compressed: false
+                  Dp Sort State: PARTITION_SORTED
+                  Statistics: Num rows: 297 Data size: 148488 Basic stats: PARTIAL Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.merge_dynamic_part_n0_temp
 
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
+  Stage: Stage-2
+    Dependency Collection
 
   Stage: Stage-0
     Move Operator
@@ -142,40 +142,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.merge_dynamic_part_n0_temp
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats Work
       Basic Stats Work:
 
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.merge_dynamic_part_n0_temp
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.merge_dynamic_part_n0_temp
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: insert overwrite table merge_dynamic_part_n0_temp partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp_n0_temp where ds='2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_merge_dp_n0_temp
diff --git a/ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition3.q.out b/ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition3.q.out
similarity index 81%
rename from ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition3.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition3.q.out
index 12d5d59..9d6669d 100644
--- a/ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition3.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition3.q.out
@@ -143,55 +143,55 @@ POSTHOOK: Input: default@srcpart_merge_dp_n2_temp@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart_merge_dp_n2_temp@ds=2008-04-09/hr=12
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart_merge_dp_n2_temp
-            filterExpr: (ds >= '2008-04-08') (type: boolean)
-            Statistics: Num rows: 594 Data size: 405536 Basic stats: PARTIAL Column stats: PARTIAL
-            Select Operator
-              expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 594 Data size: 405536 Basic stats: PARTIAL Column stats: PARTIAL
-              Reduce Output Operator
-                key expressions: _col2 (type: string), _col3 (type: string)
-                null sort order: aa
-                sort order: ++
-                Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
-                Statistics: Num rows: 594 Data size: 405536 Basic stats: PARTIAL Column stats: PARTIAL
-                value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          File Output Operator
-            compressed: false
-            Dp Sort State: PARTITION_SORTED
-            Statistics: Num rows: 594 Data size: 405536 Basic stats: PARTIAL Column stats: PARTIAL
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.merge_dynamic_part_n2_temp
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_merge_dp_n2_temp
+                  filterExpr: (ds >= '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 594 Data size: 405536 Basic stats: PARTIAL Column stats: PARTIAL
+                  Select Operator
+                    expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3
+                    Statistics: Num rows: 594 Data size: 405536 Basic stats: PARTIAL Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string), _col3 (type: string)
+                      null sort order: aa
+                      sort order: ++
+                      Map-reduce partition columns: _col2 (type: string), _col3 (type: string)
+                      Statistics: Num rows: 594 Data size: 405536 Basic stats: PARTIAL Column stats: PARTIAL
+                      value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string), KEY._col3 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                File Output Operator
+                  compressed: false
+                  Dp Sort State: PARTITION_SORTED
+                  Statistics: Num rows: 594 Data size: 405536 Basic stats: PARTIAL Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.merge_dynamic_part_n2_temp
+
+  Stage: Stage-2
+    Dependency Collection
 
   Stage: Stage-0
     Move Operator
@@ -206,40 +206,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.merge_dynamic_part_n2_temp
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats Work
       Basic Stats Work:
 
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.merge_dynamic_part_n2_temp
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.merge_dynamic_part_n2_temp
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: insert overwrite table merge_dynamic_part_n2_temp partition (ds, hr) select key, value, ds, hr from srcpart_merge_dp_n2_temp where ds>='2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_merge_dp_n2_temp
diff --git a/ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition4.q.out b/ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition4.q.out
similarity index 83%
rename from ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition4.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition4.q.out
index 8ddbb96..940b065 100644
--- a/ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition4.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition4.q.out
@@ -140,55 +140,55 @@ POSTHOOK: Input: default@srcpart_merge_dp_rc_n1_temp@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart_merge_dp_rc_n1_temp@ds=2008-04-08/hr=12
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart_merge_dp_rc_n1_temp
-            filterExpr: (ds = '2008-04-08') (type: boolean)
-            Statistics: Num rows: 1000 Data size: 349968 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 2.0D) = 0.0D), 'a1', 'b1') (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 1000 Data size: 349968 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string)
-                null sort order: a
-                sort order: +
-                Map-reduce partition columns: _col2 (type: string)
-                Statistics: Num rows: 1000 Data size: 349968 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string)
-          outputColumnNames: _col0, _col1, _col2
-          File Output Operator
-            compressed: false
-            Dp Sort State: PARTITION_SORTED
-            Statistics: Num rows: 1000 Data size: 349968 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                name: default.merge_dynamic_part_n3_temp
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_merge_dp_rc_n1_temp
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 349968 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 2.0D) = 0.0D), 'a1', 'b1') (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1000 Data size: 349968 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: string)
+                      Statistics: Num rows: 1000 Data size: 349968 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                File Output Operator
+                  compressed: false
+                  Dp Sort State: PARTITION_SORTED
+                  Statistics: Num rows: 1000 Data size: 349968 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                      name: default.merge_dynamic_part_n3_temp
 
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
+  Stage: Stage-2
+    Dependency Collection
 
   Stage: Stage-0
     Move Operator
@@ -203,30 +203,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.merge_dynamic_part_n3_temp
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats Work
       Basic Stats Work:
 
-  Stage: Stage-3
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-
-  Stage: Stage-5
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: insert overwrite table merge_dynamic_part_n3_temp partition (ds = '2008-04-08', hr)
   select key, value, if(key % 2 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc_n1_temp where ds = '2008-04-08'
 PREHOOK: type: QUERY
diff --git a/ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition5.q.out b/ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition5.q.out
similarity index 80%
rename from ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition5.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition5.q.out
index 7dbf56c..3718eef 100644
--- a/ql/src/test/results/clientpositive/temp_table_merge_dynamic_partition5.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_merge_dynamic_partition5.q.out
@@ -116,55 +116,55 @@ POSTHOOK: Input: default@srcpart_merge_dp_rc_temp@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart_merge_dp_rc_temp@ds=2008-04-08/hr=12
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart_merge_dp_rc_temp
-            filterExpr: (ds = '2008-04-08') (type: boolean)
-            Statistics: Num rows: 618 Data size: 216752 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0D) = 0.0D), 'a1', 'b1') (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 618 Data size: 216752 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col2 (type: string)
-                null sort order: a
-                sort order: +
-                Map-reduce partition columns: _col2 (type: string)
-                Statistics: Num rows: 618 Data size: 216752 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string)
-          outputColumnNames: _col0, _col1, _col2
-          File Output Operator
-            compressed: false
-            Dp Sort State: PARTITION_SORTED
-            Statistics: Num rows: 618 Data size: 216752 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                name: default.merge_dynamic_part_temp
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_merge_dp_rc_temp
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 618 Data size: 216752 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0D) = 0.0D), 'a1', 'b1') (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 618 Data size: 216752 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col2 (type: string)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: _col2 (type: string)
+                      Statistics: Num rows: 618 Data size: 216752 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY._col2 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                File Output Operator
+                  compressed: false
+                  Dp Sort State: PARTITION_SORTED
+                  Statistics: Num rows: 618 Data size: 216752 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                      name: default.merge_dynamic_part_temp
+
+  Stage: Stage-2
+    Dependency Collection
 
   Stage: Stage-0
     Move Operator
@@ -179,30 +179,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.merge_dynamic_part_temp
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats Work
       Basic Stats Work:
 
-  Stage: Stage-3
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-
-  Stage: Stage-5
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: insert overwrite table merge_dynamic_part_temp partition (ds = '2008-04-08', hr)
   select key, value, if(key % 100 == 0, 'a1', 'b1') as hr from srcpart_merge_dp_rc_temp where ds = '2008-04-08'
 PREHOOK: type: QUERY
diff --git a/ql/src/test/results/clientpositive/temp_table_options1.q.out b/ql/src/test/results/clientpositive/llap/temp_table_options1.q.out
similarity index 96%
rename from ql/src/test/results/clientpositive/temp_table_options1.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_options1.q.out
index be31a5a..3ca126b 100644
--- a/ql/src/test/results/clientpositive/temp_table_options1.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_options1.q.out
@@ -31,8 +31,8 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@impressions
 #### A masked pattern was here ####
 35	40
-48	32
 100100	40
+48	32
 PREHOOK: query: select imp,msg from impressions
 PREHOOK: type: QUERY
 PREHOOK: Input: default@impressions
@@ -42,8 +42,8 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@impressions
 #### A masked pattern was here ####
 35	40
-48	32
 100100	40
+48	32
 PREHOOK: query: drop table impressions
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@impressions
@@ -101,139 +101,139 @@ POSTHOOK: Input: default@date_serde_regex
 Baltimore	New York	2010-10-20	-30.0	1064
 Baltimore	New York	2010-10-20	23.0	1142
 Baltimore	New York	2010-10-20	6.0	1599
-Chicago	New York	2010-10-20	42.0	361
-Chicago	New York	2010-10-20	24.0	897
-Chicago	New York	2010-10-20	15.0	1531
-Chicago	New York	2010-10-20	-6.0	1610
-Chicago	New York	2010-10-20	-2.0	3198
-Baltimore	New York	2010-10-21	17.0	1064
 Baltimore	New York	2010-10-21	105.0	1142
+Baltimore	New York	2010-10-21	17.0	1064
 Baltimore	New York	2010-10-21	28.0	1599
-Chicago	New York	2010-10-21	142.0	361
-Chicago	New York	2010-10-21	77.0	897
-Chicago	New York	2010-10-21	53.0	1531
-Chicago	New York	2010-10-21	-5.0	1610
-Chicago	New York	2010-10-21	51.0	3198
 Baltimore	New York	2010-10-22	-12.0	1064
-Baltimore	New York	2010-10-22	54.0	1142
 Baltimore	New York	2010-10-22	18.0	1599
-Chicago	New York	2010-10-22	2.0	361
-Chicago	New York	2010-10-22	24.0	897
-Chicago	New York	2010-10-22	16.0	1531
-Chicago	New York	2010-10-22	-6.0	1610
-Chicago	New York	2010-10-22	-11.0	3198
-Baltimore	New York	2010-10-23	18.0	272
+Baltimore	New York	2010-10-22	54.0	1142
 Baltimore	New York	2010-10-23	-10.0	1805
+Baltimore	New York	2010-10-23	18.0	272
 Baltimore	New York	2010-10-23	6.0	3171
-Chicago	New York	2010-10-23	3.0	384
-Chicago	New York	2010-10-23	32.0	426
-Chicago	New York	2010-10-23	1.0	650
-Chicago	New York	2010-10-23	11.0	3085
 Baltimore	New York	2010-10-24	12.0	1599
 Baltimore	New York	2010-10-24	20.0	2571
-Chicago	New York	2010-10-24	10.0	361
-Chicago	New York	2010-10-24	113.0	897
-Chicago	New York	2010-10-24	-5.0	1531
-Chicago	New York	2010-10-24	-17.0	1610
-Chicago	New York	2010-10-24	-3.0	3198
 Baltimore	New York	2010-10-25	-25.0	1064
-Baltimore	New York	2010-10-25	92.0	1142
 Baltimore	New York	2010-10-25	106.0	1599
-Chicago	New York	2010-10-25	31.0	361
-Chicago	New York	2010-10-25	-1.0	897
-Chicago	New York	2010-10-25	43.0	1531
-Chicago	New York	2010-10-25	6.0	1610
-Chicago	New York	2010-10-25	-16.0	3198
+Baltimore	New York	2010-10-25	92.0	1142
 Baltimore	New York	2010-10-26	-22.0	1064
 Baltimore	New York	2010-10-26	123.0	1142
 Baltimore	New York	2010-10-26	90.0	1599
-Chicago	New York	2010-10-26	12.0	361
-Chicago	New York	2010-10-26	0.0	897
-Chicago	New York	2010-10-26	29.0	1531
-Chicago	New York	2010-10-26	-17.0	1610
-Chicago	New York	2010-10-26	6.0	3198
 Baltimore	New York	2010-10-27	-18.0	1064
 Baltimore	New York	2010-10-27	49.0	1142
 Baltimore	New York	2010-10-27	92.0	1599
-Chicago	New York	2010-10-27	148.0	361
+Baltimore	New York	2010-10-28	-14.0	1142
+Baltimore	New York	2010-10-28	-14.0	1599
+Baltimore	New York	2010-10-28	-4.0	1064
+Baltimore	New York	2010-10-29	-2.0	1599
+Baltimore	New York	2010-10-29	-24.0	1064
+Baltimore	New York	2010-10-29	21.0	1142
+Baltimore	New York	2010-10-30	-1.0	1805
+Baltimore	New York	2010-10-30	14.0	272
+Baltimore	New York	2010-10-30	5.0	3171
+Baltimore	New York	2010-10-31	-1.0	1599
+Baltimore	New York	2010-10-31	-14.0	2571
+Chicago	New York	2010-10-20	-2.0	3198
+Chicago	New York	2010-10-20	-6.0	1610
+Chicago	New York	2010-10-20	15.0	1531
+Chicago	New York	2010-10-20	24.0	897
+Chicago	New York	2010-10-20	42.0	361
+Chicago	New York	2010-10-21	-5.0	1610
+Chicago	New York	2010-10-21	142.0	361
+Chicago	New York	2010-10-21	51.0	3198
+Chicago	New York	2010-10-21	53.0	1531
+Chicago	New York	2010-10-21	77.0	897
+Chicago	New York	2010-10-22	-11.0	3198
+Chicago	New York	2010-10-22	-6.0	1610
+Chicago	New York	2010-10-22	16.0	1531
+Chicago	New York	2010-10-22	2.0	361
+Chicago	New York	2010-10-22	24.0	897
+Chicago	New York	2010-10-23	1.0	650
+Chicago	New York	2010-10-23	11.0	3085
+Chicago	New York	2010-10-23	3.0	384
+Chicago	New York	2010-10-23	32.0	426
+Chicago	New York	2010-10-24	-17.0	1610
+Chicago	New York	2010-10-24	-3.0	3198
+Chicago	New York	2010-10-24	-5.0	1531
+Chicago	New York	2010-10-24	10.0	361
+Chicago	New York	2010-10-24	113.0	897
+Chicago	New York	2010-10-25	-1.0	897
+Chicago	New York	2010-10-25	-16.0	3198
+Chicago	New York	2010-10-25	31.0	361
+Chicago	New York	2010-10-25	43.0	1531
+Chicago	New York	2010-10-25	6.0	1610
+Chicago	New York	2010-10-26	-17.0	1610
+Chicago	New York	2010-10-26	0.0	897
+Chicago	New York	2010-10-26	12.0	361
+Chicago	New York	2010-10-26	29.0	1531
+Chicago	New York	2010-10-26	6.0	3198
 Chicago	New York	2010-10-27	-11.0	897
+Chicago	New York	2010-10-27	148.0	361
+Chicago	New York	2010-10-27	21.0	3198
 Chicago	New York	2010-10-27	70.0	1531
 Chicago	New York	2010-10-27	8.0	1610
-Chicago	New York	2010-10-27	21.0	3198
-Baltimore	New York	2010-10-28	-4.0	1064
-Baltimore	New York	2010-10-28	-14.0	1142
-Baltimore	New York	2010-10-28	-14.0	1599
+Chicago	New York	2010-10-28	-11.0	1531
+Chicago	New York	2010-10-28	-18.0	3198
 Chicago	New York	2010-10-28	2.0	361
 Chicago	New York	2010-10-28	2.0	897
-Chicago	New York	2010-10-28	-11.0	1531
 Chicago	New York	2010-10-28	3.0	1610
-Chicago	New York	2010-10-28	-18.0	3198
-Baltimore	New York	2010-10-29	-24.0	1064
-Baltimore	New York	2010-10-29	21.0	1142
-Baltimore	New York	2010-10-29	-2.0	1599
-Chicago	New York	2010-10-29	-12.0	361
 Chicago	New York	2010-10-29	-11.0	897
-Chicago	New York	2010-10-29	15.0	1531
+Chicago	New York	2010-10-29	-12.0	361
 Chicago	New York	2010-10-29	-18.0	1610
 Chicago	New York	2010-10-29	-4.0	3198
-Baltimore	New York	2010-10-30	14.0	272
-Baltimore	New York	2010-10-30	-1.0	1805
-Baltimore	New York	2010-10-30	5.0	3171
-Chicago	New York	2010-10-30	-6.0	384
+Chicago	New York	2010-10-29	15.0	1531
 Chicago	New York	2010-10-30	-10.0	426
-Chicago	New York	2010-10-30	-5.0	650
 Chicago	New York	2010-10-30	-5.0	3085
-Baltimore	New York	2010-10-31	-1.0	1599
-Baltimore	New York	2010-10-31	-14.0	2571
-Chicago	New York	2010-10-31	-25.0	361
+Chicago	New York	2010-10-30	-5.0	650
+Chicago	New York	2010-10-30	-6.0	384
+Chicago	New York	2010-10-31	-15.0	3198
 Chicago	New York	2010-10-31	-18.0	897
-Chicago	New York	2010-10-31	-4.0	1531
 Chicago	New York	2010-10-31	-22.0	1610
-Chicago	New York	2010-10-31	-15.0	3198
-Cleveland	New York	2010-10-30	-23.0	2018
-Cleveland	New York	2010-10-30	-12.0	2932
-Cleveland	New York	2010-10-29	-4.0	2630
-Cleveland	New York	2010-10-29	-19.0	2646
-Cleveland	New York	2010-10-29	-12.0	3014
-Cleveland	New York	2010-10-28	3.0	2630
-Cleveland	New York	2010-10-28	-6.0	2646
-Cleveland	New York	2010-10-28	1.0	3014
-Cleveland	New York	2010-10-27	16.0	2630
-Cleveland	New York	2010-10-27	27.0	3014
-Cleveland	New York	2010-10-26	4.0	2630
-Cleveland	New York	2010-10-26	-27.0	2646
-Cleveland	New York	2010-10-26	-11.0	2662
-Cleveland	New York	2010-10-26	13.0	3014
-Cleveland	New York	2010-10-25	-4.0	2630
-Cleveland	New York	2010-10-25	81.0	2646
-Cleveland	New York	2010-10-25	42.0	3014
-Cleveland	New York	2010-10-24	5.0	2254
+Chicago	New York	2010-10-31	-25.0	361
+Chicago	New York	2010-10-31	-4.0	1531
+Cleveland	New York	2010-10-20	-15.0	3014
+Cleveland	New York	2010-10-20	-8.0	2630
+Cleveland	New York	2010-10-21	29.0	2646
+Cleveland	New York	2010-10-21	3.0	2630
+Cleveland	New York	2010-10-21	72.0	3014
+Cleveland	New York	2010-10-22	-25.0	2646
+Cleveland	New York	2010-10-22	-3.0	3014
+Cleveland	New York	2010-10-22	1.0	2630
+Cleveland	New York	2010-10-23	-21.0	2932
 Cleveland	New York	2010-10-24	-11.0	2630
 Cleveland	New York	2010-10-24	-20.0	2646
 Cleveland	New York	2010-10-24	-9.0	3014
-Cleveland	New York	2010-10-23	-21.0	2932
-Cleveland	New York	2010-10-22	1.0	2630
-Cleveland	New York	2010-10-22	-25.0	2646
-Cleveland	New York	2010-10-22	-3.0	3014
-Cleveland	New York	2010-10-21	3.0	2630
-Cleveland	New York	2010-10-21	29.0	2646
-Cleveland	New York	2010-10-21	72.0	3014
-Cleveland	New York	2010-10-20	-8.0	2630
-Cleveland	New York	2010-10-20	-15.0	3014
-Washington	New York	2010-10-23	-25.0	5832
-Washington	New York	2010-10-23	-21.0	5904
-Washington	New York	2010-10-23	-18.0	5917
-Washington	New York	2010-10-30	-27.0	5904
-Washington	New York	2010-10-30	-16.0	5917
+Cleveland	New York	2010-10-24	5.0	2254
+Cleveland	New York	2010-10-25	-4.0	2630
+Cleveland	New York	2010-10-25	42.0	3014
+Cleveland	New York	2010-10-25	81.0	2646
+Cleveland	New York	2010-10-26	-11.0	2662
+Cleveland	New York	2010-10-26	-27.0	2646
+Cleveland	New York	2010-10-26	13.0	3014
+Cleveland	New York	2010-10-26	4.0	2630
+Cleveland	New York	2010-10-27	16.0	2630
+Cleveland	New York	2010-10-27	27.0	3014
+Cleveland	New York	2010-10-28	-6.0	2646
+Cleveland	New York	2010-10-28	1.0	3014
+Cleveland	New York	2010-10-28	3.0	2630
+Cleveland	New York	2010-10-29	-12.0	3014
+Cleveland	New York	2010-10-29	-19.0	2646
+Cleveland	New York	2010-10-29	-4.0	2630
+Cleveland	New York	2010-10-30	-12.0	2932
+Cleveland	New York	2010-10-30	-23.0	2018
 Washington	New York	2010-10-20	-2.0	7291
 Washington	New York	2010-10-21	22.0	7291
 Washington	New York	2010-10-23	-16.0	7274
+Washington	New York	2010-10-23	-18.0	5917
+Washington	New York	2010-10-23	-21.0	5904
+Washington	New York	2010-10-23	-25.0	5832
 Washington	New York	2010-10-24	-26.0	7282
 Washington	New York	2010-10-25	9.0	7291
 Washington	New York	2010-10-26	4.0	7291
 Washington	New York	2010-10-27	26.0	7291
 Washington	New York	2010-10-28	45.0	7291
 Washington	New York	2010-10-29	1.0	7291
+Washington	New York	2010-10-30	-16.0	5917
+Washington	New York	2010-10-30	-27.0	5904
 Washington	New York	2010-10-31	-18.0	7282
 PREHOOK: query: select fl_date, count(*) from date_serde_regex group by fl_date
 PREHOOK: type: QUERY
@@ -278,12 +278,12 @@ POSTHOOK: type: ALTERTABLE_SERIALIZER
 POSTHOOK: Input: default@date_serde_lb
 POSTHOOK: Output: default@date_serde_lb
 PREHOOK: query: insert overwrite table date_serde_lb 
-  select fl_date, fl_num from date_serde_regex limit 1
+  select fl_date, fl_num from date_serde_regex order by fl_date, fl_num limit 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@date_serde_regex
 PREHOOK: Output: default@date_serde_lb
 POSTHOOK: query: insert overwrite table date_serde_lb 
-  select fl_date, fl_num from date_serde_regex limit 1
+  select fl_date, fl_num from date_serde_regex order by fl_date, fl_num limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_regex
 POSTHOOK: Output: default@date_serde_lb
@@ -297,7 +297,7 @@ POSTHOOK: query: select * from date_serde_lb
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_lb
 #### A masked pattern was here ####
-2010-10-20	1064
+2010-10-20	361
 PREHOOK: query: select c1, sum(c2) from date_serde_lb group by c1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@date_serde_lb
@@ -306,7 +306,7 @@ POSTHOOK: query: select c1, sum(c2) from date_serde_lb group by c1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_lb
 #### A masked pattern was here ####
-2010-10-20	1064
+2010-10-20	361
 PREHOOK: query: create temporary table date_serde_ls (
   c1 date,
   c2 int
@@ -330,12 +330,12 @@ POSTHOOK: type: ALTERTABLE_SERIALIZER
 POSTHOOK: Input: default@date_serde_ls
 POSTHOOK: Output: default@date_serde_ls
 PREHOOK: query: insert overwrite table date_serde_ls 
-  select c1, c2 from date_serde_lb limit 1
+  select c1, c2 from date_serde_lb order by c1, c2 limit 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@date_serde_lb
 PREHOOK: Output: default@date_serde_ls
 POSTHOOK: query: insert overwrite table date_serde_ls 
-  select c1, c2 from date_serde_lb limit 1
+  select c1, c2 from date_serde_lb order by c1, c2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_lb
 POSTHOOK: Output: default@date_serde_ls
@@ -349,7 +349,7 @@ POSTHOOK: query: select * from date_serde_ls
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_ls
 #### A masked pattern was here ####
-2010-10-20	1064
+2010-10-20	361
 PREHOOK: query: select c1, sum(c2) from date_serde_ls group by c1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@date_serde_ls
@@ -358,7 +358,7 @@ POSTHOOK: query: select c1, sum(c2) from date_serde_ls group by c1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_ls
 #### A masked pattern was here ####
-2010-10-20	1064
+2010-10-20	361
 PREHOOK: query: create temporary table date_serde_c (
   c1 date,
   c2 int
@@ -382,12 +382,12 @@ POSTHOOK: type: ALTERTABLE_SERIALIZER
 POSTHOOK: Input: default@date_serde_c
 POSTHOOK: Output: default@date_serde_c
 PREHOOK: query: insert overwrite table date_serde_c 
-  select c1, c2 from date_serde_ls limit 1
+  select c1, c2 from date_serde_ls order by c1, c2 limit 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@date_serde_ls
 PREHOOK: Output: default@date_serde_c
 POSTHOOK: query: insert overwrite table date_serde_c 
-  select c1, c2 from date_serde_ls limit 1
+  select c1, c2 from date_serde_ls order by c1, c2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_ls
 POSTHOOK: Output: default@date_serde_c
@@ -401,7 +401,7 @@ POSTHOOK: query: select * from date_serde_c
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_c
 #### A masked pattern was here ####
-2010-10-20	1064
+2010-10-20	361
 PREHOOK: query: select c1, sum(c2) from date_serde_c group by c1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@date_serde_c
@@ -410,7 +410,7 @@ POSTHOOK: query: select c1, sum(c2) from date_serde_c group by c1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_c
 #### A masked pattern was here ####
-2010-10-20	1064
+2010-10-20	361
 PREHOOK: query: create temporary table date_serde_lbc (
   c1 date,
   c2 int
@@ -434,12 +434,12 @@ POSTHOOK: type: ALTERTABLE_SERIALIZER
 POSTHOOK: Input: default@date_serde_lbc
 POSTHOOK: Output: default@date_serde_lbc
 PREHOOK: query: insert overwrite table date_serde_lbc 
-  select c1, c2 from date_serde_c limit 1
+  select c1, c2 from date_serde_c order by c1, c2 limit 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@date_serde_c
 PREHOOK: Output: default@date_serde_lbc
 POSTHOOK: query: insert overwrite table date_serde_lbc 
-  select c1, c2 from date_serde_c limit 1
+  select c1, c2 from date_serde_c order by c1, c2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_c
 POSTHOOK: Output: default@date_serde_lbc
@@ -453,7 +453,7 @@ POSTHOOK: query: select * from date_serde_lbc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_lbc
 #### A masked pattern was here ####
-2010-10-20	1064
+2010-10-20	361
 PREHOOK: query: select c1, sum(c2) from date_serde_lbc group by c1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@date_serde_lbc
@@ -462,7 +462,7 @@ POSTHOOK: query: select c1, sum(c2) from date_serde_lbc group by c1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_lbc
 #### A masked pattern was here ####
-2010-10-20	1064
+2010-10-20	361
 PREHOOK: query: create temporary table date_serde_orc (
   c1 date,
   c2 int
@@ -486,12 +486,12 @@ POSTHOOK: type: ALTERTABLE_SERIALIZER
 POSTHOOK: Input: default@date_serde_orc
 POSTHOOK: Output: default@date_serde_orc
 PREHOOK: query: insert overwrite table date_serde_orc 
-  select c1, c2 from date_serde_lbc limit 1
+  select c1, c2 from date_serde_lbc order by c1, c2 limit 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@date_serde_lbc
 PREHOOK: Output: default@date_serde_orc
 POSTHOOK: query: insert overwrite table date_serde_orc 
-  select c1, c2 from date_serde_lbc limit 1
+  select c1, c2 from date_serde_lbc order by c1, c2 limit 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_lbc
 POSTHOOK: Output: default@date_serde_orc
@@ -505,7 +505,7 @@ POSTHOOK: query: select * from date_serde_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_orc
 #### A masked pattern was here ####
-2010-10-20	1064
+2010-10-20	361
 PREHOOK: query: select c1, sum(c2) from date_serde_orc group by c1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@date_serde_orc
@@ -514,4 +514,4 @@ POSTHOOK: query: select c1, sum(c2) from date_serde_orc group by c1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@date_serde_orc
 #### A masked pattern was here ####
-2010-10-20	1064
+2010-10-20	361
diff --git a/ql/src/test/results/clientpositive/temp_table_parquet_mixed_partition_formats2.q.out b/ql/src/test/results/clientpositive/llap/temp_table_parquet_mixed_partition_formats2.q.out
similarity index 90%
rename from ql/src/test/results/clientpositive/temp_table_parquet_mixed_partition_formats2.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_parquet_mixed_partition_formats2.q.out
index 23bb41e..d6c9f56 100644
--- a/ql/src/test/results/clientpositive/temp_table_parquet_mixed_partition_formats2.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_parquet_mixed_partition_formats2.q.out
@@ -37,12 +37,12 @@ POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@parquet_table_json_partition_temp
 POSTHOOK: Output: default@parquet_table_json_partition_temp@ts=20150101
-PREHOOK: query: SELECT * FROM parquet_table_json_partition_temp LIMIT 100
+PREHOOK: query: SELECT * FROM parquet_table_json_partition_temp ORDER BY id, address, reports LIMIT 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@parquet_table_json_partition_temp
 PREHOOK: Input: default@parquet_table_json_partition_temp@ts=20150101
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM parquet_table_json_partition_temp LIMIT 100
+POSTHOOK: query: SELECT * FROM parquet_table_json_partition_temp ORDER BY id, address, reports LIMIT 100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@parquet_table_json_partition_temp
 POSTHOOK: Input: default@parquet_table_json_partition_temp@ts=20150101
@@ -63,37 +63,37 @@ SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
 POSTHOOK: type: ALTERTABLE_FILEFORMAT
 POSTHOOK: Input: default@parquet_table_json_partition_temp
 POSTHOOK: Output: default@parquet_table_json_partition_temp
-PREHOOK: query: SELECT * FROM parquet_table_json_partition_temp LIMIT 100
+PREHOOK: query: SELECT * FROM parquet_table_json_partition_temp ORDER BY id, address, reports LIMIT 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@parquet_table_json_partition_temp
 PREHOOK: Input: default@parquet_table_json_partition_temp@ts=20150101
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM parquet_table_json_partition_temp LIMIT 100
+POSTHOOK: query: SELECT * FROM parquet_table_json_partition_temp ORDER BY id, address, reports LIMIT 100
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@parquet_table_json_partition_temp
 POSTHOOK: Input: default@parquet_table_json_partition_temp@ts=20150101
 #### A masked pattern was here ####
 1	{"country":1,"state":1}	[2,3]	20150101
 2	{"country":1,"state":2}	[]	20150101
-PREHOOK: query: CREATE TEMPORARY TABLE new_table_temp AS SELECT * FROM parquet_table_json_partition_temp LIMIT 100
+PREHOOK: query: CREATE TEMPORARY TABLE new_table_temp AS SELECT * FROM parquet_table_json_partition_temp ORDER BY id, address, reports LIMIT 100
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@parquet_table_json_partition_temp
 PREHOOK: Input: default@parquet_table_json_partition_temp@ts=20150101
 PREHOOK: Output: database:default
 PREHOOK: Output: default@new_table_temp
-POSTHOOK: query: CREATE TEMPORARY TABLE new_table_temp AS SELECT * FROM parquet_table_json_partition_temp LIMIT 100
+POSTHOOK: query: CREATE TEMPORARY TABLE new_table_temp AS SELECT * FROM parquet_table_json_partition_temp ORDER BY id, address, reports LIMIT 100
 POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@parquet_table_json_partition_temp
 POSTHOOK: Input: default@parquet_table_json_partition_temp@ts=20150101
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@new_table_temp
-PREHOOK: query: SELECT * FROM new_table_temp
+PREHOOK: query: SELECT * FROM new_table_temp ORDER by id, address, reports
 PREHOOK: type: QUERY
 PREHOOK: Input: default@new_table_temp
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM new_table_temp
+POSTHOOK: query: SELECT * FROM new_table_temp ORDER by id, address, reports
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@new_table_temp
 #### A masked pattern was here ####
-2	{"country":1,"state":2}	[]	20150101
 1	{"country":1,"state":1}	[2,3]	20150101
+2	{"country":1,"state":2}	[]	20150101
diff --git a/ql/src/test/results/clientpositive/llap/temp_table_partition_boolexpr.q.out b/ql/src/test/results/clientpositive/llap/temp_table_partition_boolexpr.q.out
new file mode 100644
index 0000000..0f18cda
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/temp_table_partition_boolexpr.q.out
@@ -0,0 +1,317 @@
+PREHOOK: query: create temporary table part_boolexpr_temp(key int, value string) partitioned by (dt int, ts string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@part_boolexpr_temp
+POSTHOOK: query: create temporary table part_boolexpr_temp(key int, value string) partitioned by (dt int, ts string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@part_boolexpr_temp
+PREHOOK: query: select count(*) from part_boolexpr_temp where key = 'abc'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_boolexpr_temp
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from part_boolexpr_temp where key = 'abc'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_boolexpr_temp
+#### A masked pattern was here ####
+0
+PREHOOK: query: select * from part_boolexpr_temp where dt = 'abc'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_boolexpr_temp
+#### A masked pattern was here ####
+POSTHOOK: query: select * from part_boolexpr_temp where dt = 'abc'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_boolexpr_temp
+#### A masked pattern was here ####
+PREHOOK: query: explain select count(1) from srcpart where true
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+#### A masked pattern was here ####
+POSTHOOK: query: explain select count(1) from srcpart where true
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select count(1) from srcpart where false
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: explain select count(1) from srcpart where false
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart
+                  Statistics: Num rows: 2000 Data size: 37248 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 2000 Data size: 8000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Limit
+                      Number of rows: 0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        minReductionHashAggr: 0.0
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          null sort order: 
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select count(1) from srcpart where true and hr='11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+#### A masked pattern was here ####
+POSTHOOK: query: explain select count(1) from srcpart where true and hr='11'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select count(1) from srcpart where true or hr='11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+#### A masked pattern was here ####
+POSTHOOK: query: explain select count(1) from srcpart where true or hr='11'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select count(1) from srcpart where false or hr='11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+#### A masked pattern was here ####
+POSTHOOK: query: explain select count(1) from srcpart where false or hr='11'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select count(1) from srcpart where false and hr='11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: explain select count(1) from srcpart where false and hr='11'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart
+                  Statistics: Num rows: 2000 Data size: 37248 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 2000 Data size: 8000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Limit
+                      Number of rows: 0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        minReductionHashAggr: 0.0
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          null sort order: 
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select count(1) from srcpart where INPUT__FILE__NAME is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: explain select count(1) from srcpart where INPUT__FILE__NAME is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart
+                  filterExpr: INPUT__FILE__NAME is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 37248 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: INPUT__FILE__NAME is not null (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 37248 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      Statistics: Num rows: 2000 Data size: 37248 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        minReductionHashAggr: 0.99
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          null sort order: 
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
diff --git a/ql/src/test/results/clientpositive/temp_table_partition_condition_remover.q.out b/ql/src/test/results/clientpositive/llap/temp_table_partition_condition_remover.q.out
similarity index 94%
rename from ql/src/test/results/clientpositive/temp_table_partition_condition_remover.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_partition_condition_remover.q.out
index 18f5348..a6b81bc 100644
--- a/ql/src/test/results/clientpositive/temp_table_partition_condition_remover.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_partition_condition_remover.q.out
@@ -49,11 +49,9 @@ STAGE PLANS:
         TableScan
           alias: foo_n5_temp
           filterExpr: (s <> 'bar') (type: boolean)
-          Statistics: Num rows: 10 Data size: 1880 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: i (type: int), s (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 10 Data size: 1880 Basic stats: COMPLETE Column stats: PARTIAL
             ListSink
 
 PREHOOK: query: select * from foo_n5_temp where s not in ('bar')
diff --git a/ql/src/test/results/clientpositive/temp_table_partition_ctas.q.out b/ql/src/test/results/clientpositive/llap/temp_table_partition_ctas.q.out
similarity index 95%
rename from ql/src/test/results/clientpositive/temp_table_partition_ctas.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_partition_ctas.q.out
index bd3574f..9a3b4ec 100644
--- a/ql/src/test/results/clientpositive/temp_table_partition_ctas.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_partition_ctas.q.out
@@ -15,48 +15,61 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@partition_ctas_1_temp
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-3 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-3
-  Stage-2 depends on stages: Stage-0, Stage-3
+  Stage-2 depends on stages: Stage-1
+  Stage-4 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-0, Stage-4
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            filterExpr: ((UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 300.0D)) (type: boolean)
-            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ((UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 300.0D)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  filterExpr: ((UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 300.0D)) (type: boolean)
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((UDFToDouble(key) > 200.0D) and (UDFToDouble(key) < 300.0D)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: value (type: string), key (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: string)
+                        Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: string)
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
               Select Operator
-                expressions: value (type: string), key (type: string)
+                expressions: VALUE._col0 (type: string), KEY._col1 (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  null sort order: a
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
+                File Output Operator
+                  compressed: false
+                  Dp Sort State: PARTITION_SORTED
                   Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: string)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), KEY._col1 (type: string)
-          outputColumnNames: _col0, _col1
-          File Output Operator
-            compressed: false
-            Dp Sort State: PARTITION_SORTED
-            Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.partition_ctas_1_temp
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.partition_ctas_1_temp
 
-  Stage: Stage-3
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-4
     Create Table
       columns: value string
       name: default.partition_ctas_1_temp
@@ -79,7 +92,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.partition_ctas_1_temp
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats Work
       Basic Stats Work:
 
@@ -282,11 +295,9 @@ STAGE PLANS:
         TableScan
           alias: partition_ctas_1_temp
           filterExpr: (238.0D = 238.0D) (type: boolean)
-          Statistics: Num rows: 2 Data size: 550 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: value (type: string), key (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 2 Data size: 550 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT * FROM partition_ctas_1_temp where key = 238
@@ -461,11 +472,9 @@ STAGE PLANS:
         TableScan
           alias: partition_ctas_2_temp
           filterExpr: (value = 'val_238') (type: boolean)
-          Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: key (type: string), 'val_238' (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: SELECT * FROM partition_ctas_2_temp where value = 'val_238'
@@ -615,38 +624,22 @@ POSTHOOK: Input: default@partition_ctas_2_temp@value=val_296
 POSTHOOK: Input: default@partition_ctas_2_temp@value=val_298
 #### A masked pattern was here ####
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: partition_ctas_2_temp
-            filterExpr: (UDFToDouble(key) = 238.0D) (type: boolean)
-            Statistics: Num rows: 101 Data size: 36432 Basic stats: COMPLETE Column stats: PARTIAL
-            Filter Operator
-              predicate: (UDFToDouble(key) = 238.0D) (type: boolean)
-              Statistics: Num rows: 50 Data size: 18216 Basic stats: COMPLETE Column stats: PARTIAL
-              Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 50 Data size: 9200 Basic stats: COMPLETE Column stats: PARTIAL
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 50 Data size: 9200 Basic stats: COMPLETE Column stats: PARTIAL
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: partition_ctas_2_temp
+          filterExpr: (UDFToDouble(key) = 238.0D) (type: boolean)
+          Filter Operator
+            predicate: (UDFToDouble(key) = 238.0D) (type: boolean)
+            Select Operator
+              expressions: value (type: string)
+              outputColumnNames: _col0
+              ListSink
 
 PREHOOK: query: SELECT value FROM partition_ctas_2_temp where key = 238
 PREHOOK: type: QUERY
@@ -942,11 +935,9 @@ STAGE PLANS:
         TableScan
           alias: partition_ctas_diff_order_temp
           filterExpr: (value = 'val_238') (type: boolean)
-          Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: key (type: string), 'val_238' (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: SELECT * FROM partition_ctas_diff_order_temp where value = 'val_238'
@@ -1113,11 +1104,9 @@ STAGE PLANS:
         TableScan
           alias: partition_ctas_complex_order_temp
           filterExpr: (c0 = 'val_238_0') (type: boolean)
-          Statistics: Num rows: 2 Data size: 1840 Basic stats: COMPLETE Column stats: PARTIAL
           Select Operator
             expressions: c2 (type: string), c3 (type: string), c5 (type: string), 'val_238_0' (type: string), c4 (type: string), c1 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-            Statistics: Num rows: 2 Data size: 2026 Basic stats: COMPLETE Column stats: PARTIAL
             ListSink
 
 PREHOOK: query: SELECT * FROM partition_ctas_complex_order_temp where c0 = 'val_238_0'
diff --git a/ql/src/test/results/clientpositive/temp_table_partition_multilevels.q.out b/ql/src/test/results/clientpositive/llap/temp_table_partition_multilevels.q.out
similarity index 92%
rename from ql/src/test/results/clientpositive/temp_table_partition_multilevels.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_partition_multilevels.q.out
index 2ea8bf8..1fcd595 100644
--- a/ql/src/test/results/clientpositive/temp_table_partition_multilevels.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_partition_multilevels.q.out
@@ -6,7 +6,7 @@ POSTHOOK: query: create temporary table partition_test_multilevel_temp (key stri
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@partition_test_multilevel_temp
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='11') select key, value from srcpart tablesample (11 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='11') select key, value from srcpart tablesample (11 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -14,7 +14,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=111/level3=11
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='11') select key, value from srcpart tablesample (11 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='11') select key, value from srcpart tablesample (11 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -24,7 +24,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=111/level3=11
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=111,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=111,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='22') select key, value from srcpart tablesample (12 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='22') select key, value from srcpart tablesample (12 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -32,7 +32,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=111/level3=22
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='22') select key, value from srcpart tablesample (12 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='22') select key, value from srcpart tablesample (12 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -42,7 +42,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=111/level3=22
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=111,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=111,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='33') select key, value from srcpart tablesample (13 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='33') select key, value from srcpart tablesample (13 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -50,7 +50,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=111/level3=33
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='33') select key, value from srcpart tablesample (13 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='33') select key, value from srcpart tablesample (13 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -60,7 +60,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=111/level3=33
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=111,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=111,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='44') select key, value from srcpart tablesample (14 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='44') select key, value from srcpart tablesample (14 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -68,7 +68,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=111/level3=44
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='44') select key, value from srcpart tablesample (14 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='111', level3='44') select key, value from srcpart tablesample (14 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -78,7 +78,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=111/level3=44
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=111,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=111,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='11') select key, value from srcpart tablesample (15 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='11') select key, value from srcpart tablesample (15 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -86,7 +86,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=222/level3=11
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='11') select key, value from srcpart tablesample (15 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='11') select key, value from srcpart tablesample (15 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -96,7 +96,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=222/level3=11
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=222,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=222,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='22') select key, value from srcpart tablesample (16 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='22') select key, value from srcpart tablesample (16 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -104,7 +104,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=222/level3=22
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='22') select key, value from srcpart tablesample (16 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='22') select key, value from srcpart tablesample (16 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -114,7 +114,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=222/level3=22
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=222,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=222,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='33') select key, value from srcpart tablesample (17 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='33') select key, value from srcpart tablesample (17 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -122,7 +122,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=222/level3=33
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='33') select key, value from srcpart tablesample (17 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='33') select key, value from srcpart tablesample (17 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -132,7 +132,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=222/level3=33
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=222,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=222,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='44') select key, value from srcpart tablesample (18 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='44') select key, value from srcpart tablesample (18 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -140,7 +140,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=222/level3=44
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='44') select key, value from srcpart tablesample (18 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='222', level3='44') select key, value from srcpart tablesample (18 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -150,7 +150,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=222/level3=44
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=222,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=222,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='11') select key, value from srcpart tablesample (19 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='11') select key, value from srcpart tablesample (19 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -158,7 +158,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=333/level3=11
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='11') select key, value from srcpart tablesample (19 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='11') select key, value from srcpart tablesample (19 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -168,7 +168,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=333/level3=11
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=333,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=333,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='22') select key, value from srcpart tablesample (20 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='22') select key, value from srcpart tablesample (20 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -176,7 +176,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=333/level3=22
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='22') select key, value from srcpart tablesample (20 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='22') select key, value from srcpart tablesample (20 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -186,7 +186,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=333/level3=22
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=333,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=333,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='33') select key, value from srcpart tablesample (21 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='33') select key, value from srcpart tablesample (21 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -194,7 +194,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=333/level3=33
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='33') select key, value from srcpart tablesample (21 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='33') select key, value from srcpart tablesample (21 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -204,7 +204,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=333/level3=33
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=333,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=333,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='44') select key, value from srcpart tablesample (22 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='44') select key, value from srcpart tablesample (22 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -212,7 +212,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=333/level3=44
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='44') select key, value from srcpart tablesample (22 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='1111', level2='333', level3='44') select key, value from srcpart tablesample (22 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -222,7 +222,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=1111/level2=333/level3=44
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=333,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=1111,level2=333,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='11') select key, value from srcpart tablesample (11 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='11') select key, value from srcpart tablesample (11 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -230,7 +230,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=111/level3=11
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='11') select key, value from srcpart tablesample (11 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='11') select key, value from srcpart tablesample (11 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -240,7 +240,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=111/level3=11
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=111,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=111,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='22') select key, value from srcpart tablesample (12 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='22') select key, value from srcpart tablesample (12 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -248,7 +248,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=111/level3=22
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='22') select key, value from srcpart tablesample (12 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='22') select key, value from srcpart tablesample (12 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -258,7 +258,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=111/level3=22
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=111,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=111,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='33') select key, value from srcpart tablesample (13 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='33') select key, value from srcpart tablesample (13 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -266,7 +266,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=111/level3=33
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='33') select key, value from srcpart tablesample (13 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='33') select key, value from srcpart tablesample (13 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -276,7 +276,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=111/level3=33
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=111,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=111,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='44') select key, value from srcpart tablesample (14 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='44') select key, value from srcpart tablesample (14 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -284,7 +284,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=111/level3=44
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='44') select key, value from srcpart tablesample (14 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='111', level3='44') select key, value from srcpart tablesample (14 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -294,7 +294,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=111/level3=44
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=111,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=111,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='11') select key, value from srcpart tablesample (15 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='11') select key, value from srcpart tablesample (15 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -302,7 +302,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=222/level3=11
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='11') select key, value from srcpart tablesample (15 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='11') select key, value from srcpart tablesample (15 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -312,7 +312,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=222/level3=11
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=222,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=222,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='22') select key, value from srcpart tablesample (16 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='22') select key, value from srcpart tablesample (16 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -320,7 +320,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=222/level3=22
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='22') select key, value from srcpart tablesample (16 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='22') select key, value from srcpart tablesample (16 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -330,7 +330,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=222/level3=22
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=222,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=222,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='33') select key, value from srcpart tablesample (17 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='33') select key, value from srcpart tablesample (17 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -338,7 +338,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=222/level3=33
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='33') select key, value from srcpart tablesample (17 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='33') select key, value from srcpart tablesample (17 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -348,7 +348,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=222/level3=33
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=222,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=222,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='44') select key, value from srcpart tablesample (18 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='44') select key, value from srcpart tablesample (18 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -356,7 +356,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=222/level3=44
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='44') select key, value from srcpart tablesample (18 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='222', level3='44') select key, value from srcpart tablesample (18 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -366,7 +366,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=222/level3=44
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=222,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=222,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='11') select key, value from srcpart tablesample (19 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='11') select key, value from srcpart tablesample (19 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -374,7 +374,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=333/level3=11
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='11') select key, value from srcpart tablesample (19 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='11') select key, value from srcpart tablesample (19 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -384,7 +384,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=333/level3=11
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=333,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=333,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='22') select key, value from srcpart tablesample (20 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='22') select key, value from srcpart tablesample (20 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -392,7 +392,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=333/level3=22
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='22') select key, value from srcpart tablesample (20 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='22') select key, value from srcpart tablesample (20 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -402,7 +402,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=333/level3=22
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=333,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=333,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='33') select key, value from srcpart tablesample (21 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='33') select key, value from srcpart tablesample (21 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -410,7 +410,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=333/level3=33
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='33') select key, value from srcpart tablesample (21 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='33') select key, value from srcpart tablesample (21 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -420,7 +420,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=333/level3=33
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=333,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: partition_test_multilevel_temp PARTITION(level1=2222,level2=333,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='44') select key, value from srcpart tablesample (22 rows)
+PREHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='44') select key, value from srcpart tablesample (22 rows) order by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -428,7 +428,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 PREHOOK: Output: default@partition_test_multilevel_temp@level1=2222/level2=333/level3=44
-POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='44') select key, value from srcpart tablesample (22 rows)
+POSTHOOK: query: insert overwrite table partition_test_multilevel_temp partition(level1='2222', level2='333', level3='44') select key, value from srcpart tablesample (22 rows) order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
@@ -988,49 +988,58 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: partition_test_multilevel_temp
-            filterExpr: ((level1 = '2222') and level2 BETWEEN '222' AND '333' and level3 BETWEEN '11' AND '33') (type: boolean)
-            Statistics: Num rows: 108 Data size: 40890 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: level2 (type: string), level3 (type: string)
-              outputColumnNames: level2, level3
-              Statistics: Num rows: 108 Data size: 40890 Basic stats: COMPLETE Column stats: COMPLETE
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: partition_test_multilevel_temp
+                  filterExpr: ((level1 = '2222') and level2 BETWEEN '222' AND '333' and level3 BETWEEN '11' AND '33') (type: boolean)
+                  Statistics: Num rows: 108 Data size: 40890 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: level2 (type: string), level3 (type: string)
+                    outputColumnNames: level2, level3
+                    Statistics: Num rows: 108 Data size: 40890 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      keys: level2 (type: string), level3 (type: string)
+                      minReductionHashAggr: 0.962963
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 6 Data size: 2256 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        null sort order: zz
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                        Statistics: Num rows: 6 Data size: 2256 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col2 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
               Group By Operator
-                aggregations: count()
-                keys: level2 (type: string), level3 (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 6 Data size: 2256 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 6 Data size: 2256 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col2 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 6 Data size: 2256 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: '2222' (type: string), _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 6 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 6 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Select Operator
+                  expressions: '2222' (type: string), _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 6 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 6 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1588,49 +1597,58 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: partition_test_multilevel_temp
-            filterExpr: ((level1 = '2222') and level2 BETWEEN '222' AND '333' and level3 BETWEEN '11' AND '33') (type: boolean)
-            Statistics: Num rows: 108 Data size: 40890 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: level2 (type: string), level3 (type: string)
-              outputColumnNames: level2, level3
-              Statistics: Num rows: 108 Data size: 40890 Basic stats: COMPLETE Column stats: COMPLETE
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: partition_test_multilevel_temp
+                  filterExpr: ((level1 = '2222') and level2 BETWEEN '222' AND '333' and level3 BETWEEN '11' AND '33') (type: boolean)
+                  Statistics: Num rows: 108 Data size: 40890 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: level2 (type: string), level3 (type: string)
+                    outputColumnNames: level2, level3
+                    Statistics: Num rows: 108 Data size: 40890 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      keys: level2 (type: string), level3 (type: string)
+                      minReductionHashAggr: 0.962963
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 6 Data size: 2256 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        null sort order: zz
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                        Statistics: Num rows: 6 Data size: 2256 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col2 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
               Group By Operator
-                aggregations: count()
-                keys: level2 (type: string), level3 (type: string)
-                minReductionHashAggr: 0.99
-                mode: hash
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 6 Data size: 2256 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  null sort order: zz
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 6 Data size: 2256 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col2 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 6 Data size: 2256 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: '2222' (type: string), _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 6 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 6 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Select Operator
+                  expressions: '2222' (type: string), _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 6 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 6 Data size: 2784 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/temp_table_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/temp_table_partition_pruning.q.out
similarity index 68%
rename from ql/src/test/results/clientpositive/temp_table_partition_pruning.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_partition_pruning.q.out
index fe49987..5b5fe61 100644
--- a/ql/src/test/results/clientpositive/temp_table_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_partition_pruning.q.out
@@ -116,57 +116,14 @@ OPTIMIZED SQL: SELECT `customer`, `dt`
 FROM `default`.`daysales_temp`
 WHERE NVL(`dt` = '2001-01-01' AND `customer` = 1, FALSE)
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: daysales_temp
-            filterExpr: COALESCE(((dt = '2001-01-01') and (customer = 1)),false) (type: boolean)
-            Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: PARTIAL
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: COALESCE(((dt = '2001-01-01') and (customer = 1)),false) (type: boolean)
-              Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
-              Select Operator
-                expressions: customer (type: int), dt (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
-                File Output Operator
-                  bucketingVersion: 2
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      properties:
-                        bucketing_version -1
-                        columns _col0,_col1
-                        columns.types int:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels true
-                        serialization.escape.crlf true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Execution mode: vectorized
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: dt=2001-01-01
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -200,9 +157,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.daysales_temp
             name: default.daysales_temp
-#### A masked pattern was here ####
           Partition
-            base file name: dt=2001-01-03
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -236,14 +191,18 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.daysales_temp
             name: default.daysales_temp
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: daysales_temp
+          filterExpr: COALESCE(((dt = '2001-01-01') and (customer = 1)),false) (type: boolean)
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: COALESCE(((dt = '2001-01-01') and (customer = 1)),false) (type: boolean)
+            Select Operator
+              expressions: customer (type: int), dt (type: string)
+              outputColumnNames: _col0, _col1
+              ListSink
 
 PREHOOK: query: explain extended select * from daysales_temp where nvl(dt='2001-01-01' or customer=3, false)
 PREHOOK: type: QUERY
@@ -261,57 +220,14 @@ OPTIMIZED SQL: SELECT `customer`, `dt`
 FROM `default`.`daysales_temp`
 WHERE NVL(`dt` = '2001-01-01' OR `customer` = 3, FALSE)
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: daysales_temp
-            filterExpr: COALESCE(((dt = '2001-01-01') or (customer = 3)),false) (type: boolean)
-            Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: PARTIAL
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: COALESCE(((dt = '2001-01-01') or (customer = 3)),false) (type: boolean)
-              Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
-              Select Operator
-                expressions: customer (type: int), dt (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
-                File Output Operator
-                  bucketingVersion: 2
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      properties:
-                        bucketing_version -1
-                        columns _col0,_col1
-                        columns.types int:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels true
-                        serialization.escape.crlf true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Execution mode: vectorized
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: dt=2001-01-01
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -345,9 +261,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.daysales_temp
             name: default.daysales_temp
-#### A masked pattern was here ####
           Partition
-            base file name: dt=2001-01-03
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -381,14 +295,18 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.daysales_temp
             name: default.daysales_temp
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: daysales_temp
+          filterExpr: COALESCE(((dt = '2001-01-01') or (customer = 3)),false) (type: boolean)
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: COALESCE(((dt = '2001-01-01') or (customer = 3)),false) (type: boolean)
+            Select Operator
+              expressions: customer (type: int), dt (type: string)
+              outputColumnNames: _col0, _col1
+              ListSink
 
 PREHOOK: query: explain extended select * from daysales_temp where nvl(dt='2001-01-01' or customer=3, false)
 PREHOOK: type: QUERY
@@ -406,57 +324,14 @@ OPTIMIZED SQL: SELECT `customer`, `dt`
 FROM `default`.`daysales_temp`
 WHERE NVL(`dt` = '2001-01-01' OR `customer` = 3, FALSE)
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: daysales_temp
-            filterExpr: COALESCE(((dt = '2001-01-01') or (customer = 3)),false) (type: boolean)
-            Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: PARTIAL
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: COALESCE(((dt = '2001-01-01') or (customer = 3)),false) (type: boolean)
-              Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
-              Select Operator
-                expressions: customer (type: int), dt (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
-                File Output Operator
-                  bucketingVersion: 2
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      properties:
-                        bucketing_version -1
-                        columns _col0,_col1
-                        columns.types int:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels true
-                        serialization.escape.crlf true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Execution mode: vectorized
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: dt=2001-01-01
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -490,9 +365,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.daysales_temp
             name: default.daysales_temp
-#### A masked pattern was here ####
           Partition
-            base file name: dt=2001-01-03
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -526,12 +399,16 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.daysales_temp
             name: default.daysales_temp
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: daysales_temp
+          filterExpr: COALESCE(((dt = '2001-01-01') or (customer = 3)),false) (type: boolean)
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: COALESCE(((dt = '2001-01-01') or (customer = 3)),false) (type: boolean)
+            Select Operator
+              expressions: customer (type: int), dt (type: string)
+              outputColumnNames: _col0, _col1
+              ListSink
 
diff --git a/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out b/ql/src/test/results/clientpositive/llap/temp_table_windowing_expressions.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out
rename to ql/src/test/results/clientpositive/llap/temp_table_windowing_expressions.q.out
index c45f36e..96114e0 100644
--- a/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table_windowing_expressions.q.out
@@ -571,28 +571,28 @@ POSTHOOK: Input: default@part
 Manufacturer#1	1753.76
 Manufacturer#1	1632.66
 Manufacturer#1	1602.59
-Manufacturer#1	1173.15
-Manufacturer#1	1173.15
 Manufacturer#1	1414.42
 Manufacturer#2	1800.7
 Manufacturer#2	1690.68
 Manufacturer#2	2031.98
-Manufacturer#2	1698.66
 Manufacturer#2	1701.6
-Manufacturer#3	1922.98
 Manufacturer#3	1410.39
 Manufacturer#3	1671.68
 Manufacturer#3	1190.27
 Manufacturer#3	1337.29
-Manufacturer#4	1844.92
 Manufacturer#4	1375.42
+Manufacturer#5	1788.73
+Manufacturer#1	1173.15
+Manufacturer#1	1173.15
+Manufacturer#2	1698.66
+Manufacturer#3	1922.98
+Manufacturer#4	1844.92
 Manufacturer#4	1620.67
 Manufacturer#4	1206.26
 Manufacturer#4	1290.35
 Manufacturer#5	1018.1
 Manufacturer#5	1464.48
 Manufacturer#5	1789.69
-Manufacturer#5	1788.73
 Manufacturer#5	1611.66
 PREHOOK: query: select p_mfgr, avg(p_retailprice) over(partition by p_mfgr order by p_type,p_mfgr rows between unbounded preceding and current row) from part
 PREHOOK: type: QUERY
diff --git a/ql/src/test/results/clientpositive/test_teradatabinaryfile.q.out b/ql/src/test/results/clientpositive/llap/test_teradatabinaryfile.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/test_teradatabinaryfile.q.out
rename to ql/src/test/results/clientpositive/llap/test_teradatabinaryfile.q.out
index 75584e9..b0394f5 100644
--- a/ql/src/test/results/clientpositive/test_teradatabinaryfile.q.out
+++ b/ql/src/test/results/clientpositive/llap/test_teradatabinaryfile.q.out
@@ -392,12 +392,12 @@ POSTHOOK: Input: default@teradata_binary_table_1mb
 -127	3.14
 -6	0.00
 -4	3.14
--1	314.15
 2	NULL
-3	3140000000000.00
 5	314000000.00
 7	NULL
 127	0.04
+-1	314.15
+3	3140000000000.00
 NULL	12.00
 PREHOOK: query: INSERT OVERWRITE TABLE teradata_binary_table_64kb_insert
 SELECT test_tinyint, test_decimal, test_date, test_timestamp FROM teradata_binary_table_64kb
diff --git a/ql/src/test/results/clientpositive/llap/timestamp.q.out b/ql/src/test/results/clientpositive/llap/timestamp.q.out
new file mode 100644
index 0000000..52c7acb
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/timestamp.q.out
@@ -0,0 +1,415 @@
+PREHOOK: query: explain select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: explain select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 4 <- Union 2 (CONTAINS)
+        Reducer 3 <- Union 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        keys: true (type: boolean)
+                        minReductionHashAggr: 0.99
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: boolean)
+                          null sort order: z
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: boolean)
+                          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        keys: true (type: boolean)
+                        minReductionHashAggr: 0.99
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: boolean)
+                          null sort order: z
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: boolean)
+                          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: TIMESTAMP'2011-01-01 01:01:01' (type: timestamp)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain extended select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: explain extended select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+OPTIMIZED SQL: SELECT CAST(TIMESTAMP '2011-01-01 01:01:01.000000000' AS TIMESTAMP) AS `c`
+FROM (SELECT CAST(TIMESTAMP '2011-01-01 01:01:01.000000000' AS TIMESTAMP) AS `$f0`
+FROM `default`.`src`
+UNION ALL
+SELECT CAST(TIMESTAMP '2011-01-01 01:01:01.000000000' AS TIMESTAMP) AS `$f0`
+FROM `default`.`src`) AS `t1`
+GROUP BY TRUE
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 4 <- Union 2 (CONTAINS)
+        Reducer 3 <- Union 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Select Operator
+                    Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        keys: true (type: boolean)
+                        minReductionHashAggr: 0.99
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          bucketingVersion: 2
+                          key expressions: _col0 (type: boolean)
+                          null sort order: z
+                          numBuckets: -1
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: boolean)
+                          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                          tag: -1
+                          auto parallelism: true
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    bucketing_version 2
+                    column.name.delimiter ,
+                    columns key,value
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucketing_version 2
+                      column.name.delimiter ,
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [src]
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Select Operator
+                    Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        keys: true (type: boolean)
+                        minReductionHashAggr: 0.99
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          bucketingVersion: 2
+                          key expressions: _col0 (type: boolean)
+                          null sort order: z
+                          numBuckets: -1
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: boolean)
+                          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                          tag: -1
+                          auto parallelism: true
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    bucketing_version 2
+                    column.name.delimiter ,
+                    columns key,value
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucketing_version 2
+                      column.name.delimiter ,
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [src]
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: TIMESTAMP'2011-01-01 01:01:01' (type: timestamp)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    bucketingVersion: 2
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          bucketing_version -1
+                          columns _col0
+                          columns.types timestamp
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.escape.crlf true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+2011-01-01 01:01:01
+PREHOOK: query: explain select cast('2011-01-01 01:01:01.123' as timestamp) as c from src union select cast('2011-01-01 01:01:01.123' as timestamp) as c from src limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: explain select cast('2011-01-01 01:01:01.123' as timestamp) as c from src union select cast('2011-01-01 01:01:01.123' as timestamp) as c from src limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 4 <- Union 2 (CONTAINS)
+        Reducer 3 <- Union 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        keys: true (type: boolean)
+                        minReductionHashAggr: 0.99
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: boolean)
+                          null sort order: z
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: boolean)
+                          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        keys: true (type: boolean)
+                        minReductionHashAggr: 0.99
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: boolean)
+                          null sort order: z
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: boolean)
+                          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: TIMESTAMP'2011-01-01 01:01:01.123' (type: timestamp)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select cast('2011-01-01 01:01:01.123' as timestamp) as c from src union select cast('2011-01-01 01:01:01.123' as timestamp) as c from src limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select cast('2011-01-01 01:01:01.123' as timestamp) as c from src union select cast('2011-01-01 01:01:01.123' as timestamp) as c from src limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+2011-01-01 01:01:01.123
diff --git a/ql/src/test/results/clientpositive/timestamp_comparison3.q.out b/ql/src/test/results/clientpositive/llap/timestamp_comparison3.q.out
similarity index 55%
rename from ql/src/test/results/clientpositive/timestamp_comparison3.q.out
rename to ql/src/test/results/clientpositive/llap/timestamp_comparison3.q.out
index 3977be7..c152a10 100644
--- a/ql/src/test/results/clientpositive/timestamp_comparison3.q.out
+++ b/ql/src/test/results/clientpositive/llap/timestamp_comparison3.q.out
@@ -70,27 +70,31 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: onecolumntable
-            filterExpr: ts BETWEEN TIMESTAMP'2015-01-02 00:00:00' AND TIMESTAMP'2015-01-04 00:00:00' (type: boolean)
-            Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ts BETWEEN TIMESTAMP'2015-01-02 00:00:00' AND TIMESTAMP'2015-01-04 00:00:00' (type: boolean)
-              Statistics: Num rows: 3 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ts (type: timestamp)
-                outputColumnNames: _col0
-                Statistics: Num rows: 3 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 3 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: onecolumntable
+                  filterExpr: ts BETWEEN TIMESTAMP'2015-01-02 00:00:00' AND TIMESTAMP'2015-01-04 00:00:00' (type: boolean)
+                  Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ts BETWEEN TIMESTAMP'2015-01-02 00:00:00' AND TIMESTAMP'2015-01-04 00:00:00' (type: boolean)
+                    Statistics: Num rows: 3 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: ts (type: timestamp)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 3 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
 
   Stage: Stage-0
     Fetch Operator
@@ -118,27 +122,31 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: onecolumntable
-            filterExpr: ts BETWEEN TIMESTAMP'2015-01-02 00:00:00' AND TIMESTAMP'2015-01-03 00:00:00' (type: boolean)
-            Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ts BETWEEN TIMESTAMP'2015-01-02 00:00:00' AND TIMESTAMP'2015-01-03 00:00:00' (type: boolean)
-              Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ts (type: timestamp)
-                outputColumnNames: _col0
-                Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: onecolumntable
+                  filterExpr: ts BETWEEN TIMESTAMP'2015-01-02 00:00:00' AND TIMESTAMP'2015-01-03 00:00:00' (type: boolean)
+                  Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ts BETWEEN TIMESTAMP'2015-01-02 00:00:00' AND TIMESTAMP'2015-01-03 00:00:00' (type: boolean)
+                    Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: ts (type: timestamp)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
 
   Stage: Stage-0
     Fetch Operator
@@ -166,27 +174,31 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: onecolumntable
-            filterExpr: ts BETWEEN TIMESTAMP'2015-01-01 00:00:00' AND TIMESTAMP'2015-01-08 00:00:00' (type: boolean)
-            Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ts BETWEEN TIMESTAMP'2015-01-01 00:00:00' AND TIMESTAMP'2015-01-08 00:00:00' (type: boolean)
-              Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: ts (type: timestamp)
-                outputColumnNames: _col0
-                Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: onecolumntable
+                  filterExpr: ts BETWEEN TIMESTAMP'2015-01-01 00:00:00' AND TIMESTAMP'2015-01-08 00:00:00' (type: boolean)
                   Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
+                  Filter Operator
+                    predicate: ts BETWEEN TIMESTAMP'2015-01-01 00:00:00' AND TIMESTAMP'2015-01-08 00:00:00' (type: boolean)
+                    Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: ts (type: timestamp)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 5 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
 
   Stage: Stage-0
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/timestamp_ints_casts.q.out b/ql/src/test/results/clientpositive/llap/timestamp_ints_casts.q.out
similarity index 78%
rename from ql/src/test/results/clientpositive/timestamp_ints_casts.q.out
rename to ql/src/test/results/clientpositive/llap/timestamp_ints_casts.q.out
index 572c49e..8661344 100644
--- a/ql/src/test/results/clientpositive/timestamp_ints_casts.q.out
+++ b/ql/src/test/results/clientpositive/llap/timestamp_ints_casts.q.out
@@ -39,38 +39,22 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: alltypesorc
-            filterExpr: ((cbigint % 250L) = 0L) (type: boolean)
-            Statistics: Num rows: 12288 Data size: 1559690 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ((cbigint % 250L) = 0L) (type: boolean)
-              Statistics: Num rows: 6144 Data size: 779900 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp),  [...]
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                Statistics: Num rows: 6144 Data size: 2641080 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 6144 Data size: 2641080 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          filterExpr: ((cbigint % 250L) = 0L) (type: boolean)
+          Filter Operator
+            predicate: ((cbigint % 250L) = 0L) (type: boolean)
+            Select Operator
+              expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CA [...]
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+              ListSink
 
 PREHOOK: query: select
 
@@ -177,38 +161,22 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: alltypesorc
-            filterExpr: ((cbigint % 250L) = 0L) (type: boolean)
-            Statistics: Num rows: 12288 Data size: 1559690 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: ((cbigint % 250L) = 0L) (type: boolean)
-              Statistics: Num rows: 6144 Data size: 779900 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp),  [...]
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                Statistics: Num rows: 6144 Data size: 2641080 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 6144 Data size: 2641080 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: alltypesorc
+          filterExpr: ((cbigint % 250L) = 0L) (type: boolean)
+          Filter Operator
+            predicate: ((cbigint % 250L) = 0L) (type: boolean)
+            Select Operator
+              expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CA [...]
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+              ListSink
 
 PREHOOK: query: select
 
diff --git a/ql/src/test/results/clientpositive/timestamp_literal.q.out b/ql/src/test/results/clientpositive/llap/timestamp_literal.q.out
similarity index 85%
rename from ql/src/test/results/clientpositive/timestamp_literal.q.out
rename to ql/src/test/results/clientpositive/llap/timestamp_literal.q.out
index cfcd06f..513009d 100644
--- a/ql/src/test/results/clientpositive/timestamp_literal.q.out
+++ b/ql/src/test/results/clientpositive/llap/timestamp_literal.q.out
@@ -19,11 +19,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: TIMESTAMP'2011-01-01 01:01:01' (type: timestamp)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select timestamp '2011-01-01 01:01:01'
@@ -56,11 +54,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: true (type: boolean)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100'
@@ -93,11 +89,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 1 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select 1 where timestamp '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100'
diff --git a/ql/src/test/results/clientpositive/timestamptz.q.out b/ql/src/test/results/clientpositive/llap/timestamptz.q.out
similarity index 86%
rename from ql/src/test/results/clientpositive/timestamptz.q.out
rename to ql/src/test/results/clientpositive/llap/timestamptz.q.out
index 09c50dd..1aeb3da 100644
--- a/ql/src/test/results/clientpositive/timestamptz.q.out
+++ b/ql/src/test/results/clientpositive/llap/timestamptz.q.out
@@ -17,11 +17,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: TIMESTAMPLOCALTZ'2005-01-02 18:01:00.0 US/Pacific' (type: timestamp with local time zone)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select cast('2005-01-03 02:01:00 GMT' as timestamp with local time zone)
@@ -52,11 +50,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: TIMESTAMPLOCALTZ'2016-01-03 12:26:34.0123 US/Pacific' (type: timestamp with local time zone)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select cast('2016-01-03 12:26:34.0123 America/Los_Angeles' as timestamplocaltz)
@@ -87,11 +83,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: TIMESTAMPLOCALTZ'2016-01-02 16:00:00.0 US/Pacific' (type: timestamp with local time zone)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select cast('2016-01-03Europe/London' as timestamplocaltz)
@@ -122,11 +116,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: TIMESTAMPLOCALTZ'2016-01-03 04:34:56.38 US/Pacific' (type: timestamp with local time zone)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select cast('2016-01-03 13:34:56.38 +1:00' as timestamplocaltz)
diff --git a/ql/src/test/results/clientpositive/truncate_column_buckets.q.out b/ql/src/test/results/clientpositive/llap/truncate_column_buckets.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/truncate_column_buckets.q.out
rename to ql/src/test/results/clientpositive/llap/truncate_column_buckets.q.out
diff --git a/ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out b/ql/src/test/results/clientpositive/llap/truncate_column_list_bucket.q.out
similarity index 61%
rename from ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out
rename to ql/src/test/results/clientpositive/llap/truncate_column_list_bucket.q.out
index d30bb93..40a127b 100644
--- a/ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out
+++ b/ql/src/test/results/clientpositive/llap/truncate_column_list_bucket.q.out
@@ -63,57 +63,14 @@ OPTIMIZED SQL: SELECT CAST('484' AS STRING) AS `key`, `value`, CAST('1' AS STRIN
 FROM `default`.`test_tab_n3`
 WHERE `key` = '484' AND `part` = '1'
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: test_tab_n3
-            filterExpr: ((key = '484') and (part = '1')) (type: boolean)
-            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: (key = '484') (type: boolean)
-              Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: '484' (type: string), value (type: string), '1' (type: string)
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 2 Data size: 526 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  bucketingVersion: 2
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 2 Data size: 526 Basic stats: COMPLETE Column stats: COMPLETE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      properties:
-                        bucketing_version -1
-                        columns _col0,_col1,_col2
-                        columns.types string:string:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels true
-                        serialization.escape.crlf true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Execution mode: vectorized
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: key=484
             input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
             partition values:
@@ -147,14 +104,18 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.test_tab_n3
             name: default.test_tab_n3
-      Truncated Path -> Alias:
-        /test_tab_n3/part=1/key=484 [test_tab_n3]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: test_tab_n3
+          filterExpr: ((key = '484') and (part = '1')) (type: boolean)
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (key = '484') (type: boolean)
+            Select Operator
+              expressions: '484' (type: string), value (type: string), '1' (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              ListSink
 
 PREHOOK: query: SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '484'
 PREHOOK: type: QUERY
@@ -181,57 +142,14 @@ OPTIMIZED SQL: SELECT CAST('0' AS STRING) AS `key`, `value`, CAST('1' AS STRING)
 FROM `default`.`test_tab_n3`
 WHERE `key` = '0' AND `part` = '1'
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: test_tab_n3
-            filterExpr: ((key = '0') and (part = '1')) (type: boolean)
-            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: (key = '0') (type: boolean)
-              Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: '0' (type: string), value (type: string), '1' (type: string)
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 2 Data size: 522 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  bucketingVersion: 2
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 2 Data size: 522 Basic stats: COMPLETE Column stats: COMPLETE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      properties:
-                        bucketing_version -1
-                        columns _col0,_col1,_col2
-                        columns.types string:string:string
-                        escape.delim \
-                        hive.serialization.extend.additional.nesting.levels true
-                        serialization.escape.crlf true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Execution mode: vectorized
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
           Partition
-            base file name: HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME
             input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
             partition values:
@@ -265,14 +183,18 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
               name: default.test_tab_n3
             name: default.test_tab_n3
-      Truncated Path -> Alias:
-        /test_tab_n3/part=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [test_tab_n3]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: test_tab_n3
+          filterExpr: ((key = '0') and (part = '1')) (type: boolean)
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (key = '0') (type: boolean)
+            Select Operator
+              expressions: '0' (type: string), value (type: string), '1' (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              ListSink
 
 PREHOOK: query: SELECT * FROM test_tab_n3 WHERE part = '1' AND key = '0'
 PREHOOK: type: QUERY
diff --git a/ql/src/test/results/clientpositive/type_cast_1.q.out b/ql/src/test/results/clientpositive/llap/type_cast_1.q.out
similarity index 77%
rename from ql/src/test/results/clientpositive/type_cast_1.q.out
rename to ql/src/test/results/clientpositive/llap/type_cast_1.q.out
index 22dad1a..4d16dbc 100644
--- a/ql/src/test/results/clientpositive/type_cast_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/type_cast_1.q.out
@@ -18,14 +18,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 5 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
             Limit
               Number of rows: 1
-              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT IF(false, 1, cast(2 as smallint)) + 3 FROM src LIMIT 1
diff --git a/ql/src/test/results/clientpositive/type_widening.q.out b/ql/src/test/results/clientpositive/llap/type_widening.q.out
similarity index 83%
rename from ql/src/test/results/clientpositive/type_widening.q.out
rename to ql/src/test/results/clientpositive/llap/type_widening.q.out
index f295e66..360b356 100644
--- a/ql/src/test/results/clientpositive/type_widening.q.out
+++ b/ql/src/test/results/clientpositive/llap/type_widening.q.out
@@ -16,14 +16,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 0L (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
             Limit
               Number of rows: 1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT COALESCE(0, 9223372036854775807) FROM src LIMIT 1
@@ -49,48 +46,62 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: 0L (type: bigint)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
-              Union
-                Statistics: Num rows: 1000 Data size: 8000 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: bigint)
-                  null sort order: z
-                  sort order: +
-                  Statistics: Num rows: 1000 Data size: 8000 Basic stats: COMPLETE Column stats: COMPLETE
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: 9223372036854775807L (type: bigint)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
-              Union
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 4 <- Union 2 (CONTAINS)
+        Reducer 3 <- Union 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: 0L (type: bigint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: bigint)
+                      null sort order: z
+                      sort order: +
+                      Statistics: Num rows: 1000 Data size: 8000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: 9223372036854775807L (type: bigint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: bigint)
+                      null sort order: z
+                      sort order: +
+                      Statistics: Num rows: 1000 Data size: 8000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: bigint)
+                outputColumnNames: _col0
                 Statistics: Num rows: 1000 Data size: 8000 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: bigint)
-                  null sort order: z
-                  sort order: +
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 1000 Data size: 8000 Basic stats: COMPLETE Column stats: COMPLETE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: bigint)
-          outputColumnNames: _col0
-          Statistics: Num rows: 1000 Data size: 8000 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1000 Data size: 8000 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Union 2 
+            Vertex: Union 2
 
   Stage: Stage-0
     Fetch Operator
@@ -1133,14 +1144,11 @@ STAGE PLANS:
         TableScan
           alias: t1_n114
           filterExpr: (a > 2Y) (type: boolean)
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: (a > 2Y) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: tinyint), b (type: smallint)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               ListSink
 
 PREHOOK: query: explain select * from t1_n114 where b < 2
@@ -1162,14 +1170,11 @@ STAGE PLANS:
         TableScan
           alias: t1_n114
           filterExpr: (b < 2S) (type: boolean)
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: (b < 2S) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: tinyint), b (type: smallint)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               ListSink
 
 PREHOOK: query: explain select * from t1_n114 where a < 200
@@ -1191,14 +1196,11 @@ STAGE PLANS:
         TableScan
           alias: t1_n114
           filterExpr: (UDFToInteger(a) < 200) (type: boolean)
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: (UDFToInteger(a) < 200) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: tinyint), b (type: smallint)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               ListSink
 
 PREHOOK: query: explain select * from t1_n114 where b > 40000
@@ -1220,14 +1222,11 @@ STAGE PLANS:
         TableScan
           alias: t1_n114
           filterExpr: (UDFToInteger(b) > 40000) (type: boolean)
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: (UDFToInteger(b) > 40000) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: tinyint), b (type: smallint)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               ListSink
 
 PREHOOK: query: drop table t1_n114
diff --git a/ql/src/test/results/clientpositive/udaf_binarysetfunctions.q.out b/ql/src/test/results/clientpositive/llap/udaf_binarysetfunctions.q.out
similarity index 71%
rename from ql/src/test/results/clientpositive/udaf_binarysetfunctions.q.out
rename to ql/src/test/results/clientpositive/llap/udaf_binarysetfunctions.q.out
index 86dbcf6..81c56c2 100644
--- a/ql/src/test/results/clientpositive/udaf_binarysetfunctions.q.out
+++ b/ql/src/test/results/clientpositive/llap/udaf_binarysetfunctions.q.out
@@ -372,75 +372,75 @@ POSTHOOK: Input: default@t_n21
 #### A masked pattern was here ####
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t_n21
-            Statistics: Num rows: 29 Data size: 5044 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: px (type: int), x (type: decimal(10,0)), y (type: decimal(10,0)), UDFToDouble(x) (type: double), (UDFToDouble(x) * UDFToDouble(x)) (type: double), UDFToDouble(y) (type: double), (UDFToDouble(y) * UDFToDouble(y)) (type: double)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-              Statistics: Num rows: 29 Data size: 5044 Basic stats: COMPLETE Column stats: COMPLETE
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t_n21
+                  Statistics: Num rows: 29 Data size: 5044 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: px (type: int), x (type: decimal(10,0)), y (type: decimal(10,0)), UDFToDouble(x) (type: double), (UDFToDouble(x) * UDFToDouble(x)) (type: double), UDFToDouble(y) (type: double), (UDFToDouble(y) * UDFToDouble(y)) (type: double)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                    Statistics: Num rows: 29 Data size: 5044 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: sum(_col4), sum(_col3), count(_col1), sum(_col6), sum(_col5), count(_col2), corr(_col2, _col1), covar_samp(_col2, _col1), covar_pop(_col2, _col1), regr_count(_col2, _col1), regr_slope(_col2, _col1), regr_intercept(_col2, _col1), regr_r2(_col2, _col1), regr_sxx(_col2, _col1), regr_syy(_col2, _col1), regr_sxy(_col2, _col1), regr_avgx(_col2, _col1), regr_avgy(_col2, _col1)
+                      keys: _col0 (type: int)
+                      minReductionHashAggr: 0.7586207
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18
+                      Statistics: Num rows: 7 Data size: 11396 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 7 Data size: 11396 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: bigint), _col4 (type: double), _col5 (type: double), _col6 (type: bigint), _col7 (type: struct<count:bigint,xavg:double,yavg:double,xvar:double,yvar:double,covar:double>), _col8 (type: struct<count:bigint,xavg:double,yavg:double,covar:double>), _col9 (type: struct<count:bigint,xavg:double,yavg:double,covar:double>), _col10 (type: bigint), _col11 (type: struct<count:bigint,xavg:double,yavg: [...]
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
               Group By Operator
-                aggregations: sum(_col4), sum(_col3), count(_col1), sum(_col6), sum(_col5), count(_col2), corr(_col2, _col1), covar_samp(_col2, _col1), covar_pop(_col2, _col1), regr_count(_col2, _col1), regr_slope(_col2, _col1), regr_intercept(_col2, _col1), regr_r2(_col2, _col1), regr_sxx(_col2, _col1), regr_syy(_col2, _col1), regr_sxy(_col2, _col1), regr_avgx(_col2, _col1), regr_avgy(_col2, _col1)
-                keys: _col0 (type: int)
-                minReductionHashAggr: 0.99
-                mode: hash
+                aggregations: sum(VALUE._col0), sum(VALUE._col1), count(VALUE._col2), sum(VALUE._col3), sum(VALUE._col4), count(VALUE._col5), corr(VALUE._col6), covar_samp(VALUE._col7), covar_pop(VALUE._col8), regr_count(VALUE._col9), regr_slope(VALUE._col10), regr_intercept(VALUE._col11), regr_r2(VALUE._col12), regr_sxx(VALUE._col13), regr_syy(VALUE._col14), regr_sxy(VALUE._col15), regr_avgx(VALUE._col16), regr_avgy(VALUE._col17)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18
-                Statistics: Num rows: 7 Data size: 11396 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  null sort order: z
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 7 Data size: 11396 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: bigint), _col4 (type: double), _col5 (type: double), _col6 (type: bigint), _col7 (type: struct<count:bigint,xavg:double,yavg:double,xvar:double,yvar:double,covar:double>), _col8 (type: struct<count:bigint,xavg:double,yavg:double,covar:double>), _col9 (type: struct<count:bigint,xavg:double,yavg:double,covar:double>), _col10 (type: bigint), _col11 (type: struct<count:bigint,xavg:double,yavg:double [...]
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: sum(VALUE._col0), sum(VALUE._col1), count(VALUE._col2), sum(VALUE._col3), sum(VALUE._col4), count(VALUE._col5), corr(VALUE._col6), covar_samp(VALUE._col7), covar_pop(VALUE._col8), regr_count(VALUE._col9), regr_slope(VALUE._col10), regr_intercept(VALUE._col11), regr_r2(VALUE._col12), regr_sxx(VALUE._col13), regr_syy(VALUE._col14), regr_sxy(VALUE._col15), regr_avgx(VALUE._col16), regr_avgy(VALUE._col17)
-          keys: KEY._col0 (type: int)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18
-          Statistics: Num rows: 7 Data size: 2492 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: int), ((_col1 - ((_col2 * _col2) / _col3)) / _col3) (type: double), ((_col4 - ((_col5 * _col5) / _col6)) / _col6) (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: bigint), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: decimal(14,4)), _col18 (type: decimal(14,4))
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-            Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int)
-              null sort order: z
-              sort order: +
-              Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: bigint), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: decimal(14,4)), _col14 (type: decimal(14,4))
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: bigint), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: decimal(14,4)), VALUE._col13 (type: decimal(14,4)), VALUE._col5 (type: bigint)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-          Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Statistics: Num rows: 7 Data size: 2492 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: int), ((_col1 - ((_col2 * _col2) / _col3)) / _col3) (type: double), ((_col4 - ((_col5 * _col5) / _col6)) / _col6) (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: bigint), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double), _col15 (type: double), _col16 (type: double), _col17 (type: decimal(14,4)), _col18 (type: decimal(14,4))
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
+                  Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    null sort order: z
+                    sort order: +
+                    Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: bigint), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: decimal(14,4)), _col14 (type: decimal(14,4))
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: bigint), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: decimal(14,4)), VALUE._col13 (type: decimal(14,4)), VALUE._col5 (type: bigint)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
+                Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/udaf_binarysetfunctions_no_cbo.q.out b/ql/src/test/results/clientpositive/llap/udaf_binarysetfunctions_no_cbo.q.out
similarity index 76%
rename from ql/src/test/results/clientpositive/udaf_binarysetfunctions_no_cbo.q.out
rename to ql/src/test/results/clientpositive/llap/udaf_binarysetfunctions_no_cbo.q.out
index 6857ca9..c53a5ec 100644
--- a/ql/src/test/results/clientpositive/udaf_binarysetfunctions_no_cbo.q.out
+++ b/ql/src/test/results/clientpositive/llap/udaf_binarysetfunctions_no_cbo.q.out
@@ -372,71 +372,71 @@ POSTHOOK: Input: default@t_n6
 #### A masked pattern was here ####
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t_n6
-            Statistics: Num rows: 29 Data size: 5044 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: px (type: int), y (type: decimal(10,0)), x (type: decimal(10,0))
-              outputColumnNames: px, y, x
-              Statistics: Num rows: 29 Data size: 5044 Basic stats: COMPLETE Column stats: COMPLETE
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t_n6
+                  Statistics: Num rows: 29 Data size: 5044 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: px (type: int), y (type: decimal(10,0)), x (type: decimal(10,0))
+                    outputColumnNames: px, y, x
+                    Statistics: Num rows: 29 Data size: 5044 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: var_pop(x), var_pop(y), corr(y, x), covar_samp(y, x), covar_pop(y, x), regr_count(y, x), regr_slope(y, x), regr_intercept(y, x), regr_r2(y, x), regr_sxx(y, x), regr_syy(y, x), regr_sxy(y, x), regr_avgx(y, x), regr_avgy(y, x)
+                      keys: px (type: int)
+                      minReductionHashAggr: 0.7586207
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
+                      Statistics: Num rows: 7 Data size: 12180 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 7 Data size: 12180 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,xavg:double,yavg:double,xvar:double,yvar:double,covar:double>), _col4 (type: struct<count:bigint,xavg:double,yavg:double,covar:double>), _col5 (type: struct<count:bigint,xavg:double,yavg:double,covar:double>), _col6 (type: bigint), _col7 (type: struct<count:bigint,xavg:double,yavg:double,x [...]
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
               Group By Operator
-                aggregations: var_pop(x), var_pop(y), corr(y, x), covar_samp(y, x), covar_pop(y, x), regr_count(y, x), regr_slope(y, x), regr_intercept(y, x), regr_r2(y, x), regr_sxx(y, x), regr_syy(y, x), regr_sxy(y, x), regr_avgx(y, x), regr_avgy(y, x)
-                keys: px (type: int)
-                minReductionHashAggr: 0.99
-                mode: hash
+                aggregations: var_pop(VALUE._col0), var_pop(VALUE._col1), corr(VALUE._col2), covar_samp(VALUE._col3), covar_pop(VALUE._col4), regr_count(VALUE._col5), regr_slope(VALUE._col6), regr_intercept(VALUE._col7), regr_r2(VALUE._col8), regr_sxx(VALUE._col9), regr_syy(VALUE._col10), regr_sxy(VALUE._col11), regr_avgx(VALUE._col12), regr_avgy(VALUE._col13)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-                Statistics: Num rows: 7 Data size: 12180 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 7 Data size: 2268 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   null sort order: z
                   sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 7 Data size: 12180 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,xavg:double,yavg:double,xvar:double,yvar:double,covar:double>), _col4 (type: struct<count:bigint,xavg:double,yavg:double,covar:double>), _col5 (type: struct<count:bigint,xavg:double,yavg:double,covar:double>), _col6 (type: bigint), _col7 (type: struct<count:bigint,xavg:double,yavg:double,xvar:do [...]
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: var_pop(VALUE._col0), var_pop(VALUE._col1), corr(VALUE._col2), covar_samp(VALUE._col3), covar_pop(VALUE._col4), regr_count(VALUE._col5), regr_slope(VALUE._col6), regr_intercept(VALUE._col7), regr_r2(VALUE._col8), regr_sxx(VALUE._col9), regr_syy(VALUE._col10), regr_sxy(VALUE._col11), regr_avgx(VALUE._col12), regr_avgy(VALUE._col13)
-          keys: KEY._col0 (type: int)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-          Statistics: Num rows: 7 Data size: 2268 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int)
-              null sort order: z
-              sort order: +
-              Statistics: Num rows: 7 Data size: 2268 Basic stats: COMPLETE Column stats: COMPLETE
-              value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: bigint), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: decimal(14,4)), _col14 (type: decimal(14,4))
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: bigint), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: decimal(14,4)), VALUE._col13 (type: decimal(14,4)), VALUE._col5 (type: bigint)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-          Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  Statistics: Num rows: 7 Data size: 2268 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: bigint), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: decimal(14,4)), _col14 (type: decimal(14,4))
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: bigint), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: decimal(14,4)), VALUE._col13 (type: decimal(14,4)), VALUE._col5 (type: bigint)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
+                Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 7 Data size: 2324 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/llap/udaf_number_format.q.out b/ql/src/test/results/clientpositive/llap/udaf_number_format.q.out
new file mode 100644
index 0000000..a1ae4cc
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/udaf_number_format.q.out
@@ -0,0 +1,95 @@
+PREHOOK: query: EXPLAIN SELECT
+  sum('a'),
+  avg('a'),
+  variance('a'),
+  std('a')
+FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: EXPLAIN SELECT
+  sum('a'),
+  avg('a'),
+  variance('a'),
+  std('a')
+FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: sum('a'), count(), sum(null), sum(null)
+                      minReductionHashAggr: 0.99
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        null sort order: 
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: double), _col1 (type: bigint), _col2 (type: double), _col3 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), count(VALUE._col1), sum(VALUE._col2), sum(VALUE._col3)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: double), (_col0 / _col1) (type: double), ((_col2 - ((_col3 * _col3) / _col1)) / _col1) (type: double), power(((_col2 - ((_col3 * _col3) / _col1)) / _col1), 0.5) (type: double)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT
+  sum('a'),
+  avg('a'),
+  variance('a'),
+  std('a')
+FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT
+  sum('a'),
+  avg('a'),
+  variance('a'),
+  std('a')
+FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0.0	0.0	NULL	NULL
diff --git a/ql/src/test/results/clientpositive/udaf_percentile_cont.q.out b/ql/src/test/results/clientpositive/llap/udaf_percentile_cont.q.out
similarity index 86%
rename from ql/src/test/results/clientpositive/udaf_percentile_cont.q.out
rename to ql/src/test/results/clientpositive/llap/udaf_percentile_cont.q.out
index 509ae7b..8810a4c 100644
--- a/ql/src/test/results/clientpositive/udaf_percentile_cont.q.out
+++ b/ql/src/test/results/clientpositive/llap/udaf_percentile_cont.q.out
@@ -498,43 +498,53 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t_test
-            Statistics: Num rows: 17 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: value (type: int)
-              outputColumnNames: _col0
-              Statistics: Num rows: 17 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t_test
+                  Statistics: Num rows: 17 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: value (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 17 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: percentile_cont(_col0, 0), percentile_cont(_col0, 0.2), percentile_cont(0.2, _col0, 1, 0), percentile_cont(0.2, _col0, 1, 1), percentile_cont(0.2, _col0, 0, 0), percentile_cont(0.2, _col0, 0, 1)
+                      minReductionHashAggr: 0.9411765
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                      Statistics: Num rows: 1 Data size: 4728 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        null sort order: 
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 4728 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col1 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col2 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col3 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col4 (type: struct<counts:map<bigint,bigint>,percentiles:array<doubl [...]
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
               Group By Operator
-                aggregations: percentile_cont(_col0, 0), percentile_cont(_col0, 0.2), percentile_cont(0.2, _col0, 1, 0), percentile_cont(0.2, _col0, 1, 1), percentile_cont(0.2, _col0, 0, 0), percentile_cont(0.2, _col0, 0, 1)
-                minReductionHashAggr: 0.99
-                mode: hash
+                aggregations: percentile_cont(VALUE._col0), percentile_cont(VALUE._col1), percentile_cont(VALUE._col2), percentile_cont(VALUE._col3), percentile_cont(VALUE._col4), percentile_cont(VALUE._col5)
+                mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Statistics: Num rows: 1 Data size: 4728 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  null sort order: 
-                  sort order: 
-                  Statistics: Num rows: 1 Data size: 4728 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col1 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col2 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col3 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col4 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isa [...]
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: percentile_cont(VALUE._col0), percentile_cont(VALUE._col1), percentile_cont(VALUE._col2), percentile_cont(VALUE._col3), percentile_cont(VALUE._col4), percentile_cont(VALUE._col5)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col2 (type: double), (_col2 = _col1) (type: boolean), _col2 (type: double), (_col2 = _col1) (type: boolean), _col4 (type: double), _col5 (type: double), _col4 (type: double)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-            Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col2 (type: double), (_col2 = _col1) (type: boolean), _col2 (type: double), (_col2 = _col1) (type: boolean), _col4 (type: double), _col5 (type: double), _col4 (type: double)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                  Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/udaf_percentile_disc.q.out b/ql/src/test/results/clientpositive/llap/udaf_percentile_disc.q.out
similarity index 86%
rename from ql/src/test/results/clientpositive/udaf_percentile_disc.q.out
rename to ql/src/test/results/clientpositive/llap/udaf_percentile_disc.q.out
index e7efcf9..ddb46ac 100644
--- a/ql/src/test/results/clientpositive/udaf_percentile_disc.q.out
+++ b/ql/src/test/results/clientpositive/llap/udaf_percentile_disc.q.out
@@ -498,43 +498,53 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t_test
-            Statistics: Num rows: 17 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: value (type: int)
-              outputColumnNames: _col0
-              Statistics: Num rows: 17 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t_test
+                  Statistics: Num rows: 17 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: value (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 17 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: percentile_disc(_col0, 0), percentile_disc(_col0, 0.2), percentile_disc(0.2, _col0, 1, 0), percentile_disc(0.2, _col0, 1, 1), percentile_disc(0.2, _col0, 0, 0), percentile_disc(0.2, _col0, 0, 1)
+                      minReductionHashAggr: 0.9411765
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                      Statistics: Num rows: 1 Data size: 4728 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        null sort order: 
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 4728 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col1 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col2 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col3 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col4 (type: struct<counts:map<bigint,bigint>,percentiles:array<doubl [...]
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
               Group By Operator
-                aggregations: percentile_disc(_col0, 0), percentile_disc(_col0, 0.2), percentile_disc(0.2, _col0, 1, 0), percentile_disc(0.2, _col0, 1, 1), percentile_disc(0.2, _col0, 0, 0), percentile_disc(0.2, _col0, 0, 1)
-                minReductionHashAggr: 0.99
-                mode: hash
+                aggregations: percentile_disc(VALUE._col0), percentile_disc(VALUE._col1), percentile_disc(VALUE._col2), percentile_disc(VALUE._col3), percentile_disc(VALUE._col4), percentile_disc(VALUE._col5)
+                mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Statistics: Num rows: 1 Data size: 4728 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  null sort order: 
-                  sort order: 
-                  Statistics: Num rows: 1 Data size: 4728 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col1 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col2 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col3 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isascending:boolean>), _col4 (type: struct<counts:map<bigint,bigint>,percentiles:array<double>,isa [...]
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: percentile_disc(VALUE._col0), percentile_disc(VALUE._col1), percentile_disc(VALUE._col2), percentile_disc(VALUE._col3), percentile_disc(VALUE._col4), percentile_disc(VALUE._col5)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col2 (type: double), (_col2 = _col1) (type: boolean), _col2 (type: double), (_col2 = _col1) (type: boolean), _col4 (type: double), _col5 (type: double), _col4 (type: double)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-            Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col2 (type: double), (_col2 = _col1) (type: boolean), _col2 (type: double), (_col2 = _col1) (type: boolean), _col4 (type: double), _col5 (type: double), _col4 (type: double)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                  Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/llap/udf1.q.out b/ql/src/test/results/clientpositive/llap/udf1.q.out
new file mode 100644
index 0000000..05e23f0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/udf1.q.out
@@ -0,0 +1,176 @@
+PREHOOK: query: CREATE TABLE dest1_n1(c1 STRING, c2 STRING, c3 STRING, c4 STRING,
+  c5 STRING, c6 STRING, c7 STRING, c8 STRING,
+  c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING,
+  c14 STRING, c15 STRING, c16 STRING, c17 STRING,
+  c18 STRING, c19 STRING, c20 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1_n1
+POSTHOOK: query: CREATE TABLE dest1_n1(c1 STRING, c2 STRING, c3 STRING, c4 STRING,
+  c5 STRING, c6 STRING, c7 STRING, c8 STRING,
+  c9 STRING, c10 STRING, c11 STRING, c12 STRING, c13 STRING,
+  c14 STRING, c15 STRING, c16 STRING, c17 STRING,
+  c18 STRING, c19 STRING, c20 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1_n1
+PREHOOK: query: EXPLAIN
+FROM src INSERT OVERWRITE TABLE dest1_n1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_',
+  '%_' LIKE '\%\_', 'ab' LIKE '\%\_', 'ab' LIKE '_a%', 'ab' LIKE 'a',
+  '' RLIKE '.*', 'a' RLIKE '[ab]', '' RLIKE '[ab]', 'hadoop' RLIKE '[a-z]*', 'hadoop' RLIKE 'o*',
+  REGEXP_REPLACE('abc', 'b', 'c'), REGEXP_REPLACE('abc', 'z', 'a'), REGEXP_REPLACE('abbbb', 'bb', 'b'),
+  REGEXP_REPLACE('hadoop', '(.)[a-z]*', '$1ive'), REGEXP_REPLACE('hadoopAAA','A.*',''),
+  REGEXP_REPLACE('abc', '', 'A'), 'abc' RLIKE ''
+  WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1_n1
+POSTHOOK: query: EXPLAIN
+FROM src INSERT OVERWRITE TABLE dest1_n1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_',
+  '%_' LIKE '\%\_', 'ab' LIKE '\%\_', 'ab' LIKE '_a%', 'ab' LIKE 'a',
+  '' RLIKE '.*', 'a' RLIKE '[ab]', '' RLIKE '[ab]', 'hadoop' RLIKE '[a-z]*', 'hadoop' RLIKE 'o*',
+  REGEXP_REPLACE('abc', 'b', 'c'), REGEXP_REPLACE('abc', 'z', 'a'), REGEXP_REPLACE('abbbb', 'bb', 'b'),
+  REGEXP_REPLACE('hadoop', '(.)[a-z]*', '$1ive'), REGEXP_REPLACE('hadoopAAA','A.*',''),
+  REGEXP_REPLACE('abc', '', 'A'), 'abc' RLIKE ''
+  WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1_n1
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  filterExpr: (UDFToDouble(key) = 86.0D) (type: boolean)
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (UDFToDouble(key) = 86.0D) (type: boolean)
+                    Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: 'TRUE' (type: string), 'FALSE' (type: string), 'TRUE' (type: string), 'TRUE' (type: string), 'TRUE' (type: string), 'FALSE' (type: string), 'FALSE' (type: string), 'FALSE' (type: string), 'TRUE' (type: string), 'TRUE' (type: string), 'FALSE' (type: string), 'TRUE' (type: string), 'TRUE' (type: string), 'acc' (type: string), 'abc' (type: string), 'abb' (type: string), 'hive' (type: string), 'hadoop' (type: string), 'AaAbAcA' (type: string), 'FALSE' (type [...]
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19
+                      Statistics: Num rows: 250 Data size: 442000 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 250 Data size: 442000 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.dest1_n1
+                      Select Operator
+                        expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: string), _col18 (type: string), _col19 (type: string)
+                        outputColumnNames: c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20
+                        Statistics: Num rows: 250 Data size: 442000 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: compute_stats(c1, 'hll'), compute_stats(c2, 'hll'), compute_stats(c3, 'hll'), compute_stats(c4, 'hll'), compute_stats(c5, 'hll'), compute_stats(c6, 'hll'), compute_stats(c7, 'hll'), compute_stats(c8, 'hll'), compute_stats(c9, 'hll'), compute_stats(c10, 'hll'), compute_stats(c11, 'hll'), compute_stats(c12, 'hll'), compute_stats(c13, 'hll'), compute_stats(c14, 'hll'), compute_stats(c15, 'hll'), compute_stats(c16, 'hll'), compute_stats(c17, 'hll'), co [...]
+                          minReductionHashAggr: 0.99
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19
+                          Statistics: Num rows: 1 Data size: 8800 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            null sort order: 
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8800 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,coun [...]
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3), compute_stats(VALUE._col4), compute_stats(VALUE._col5), compute_stats(VALUE._col6), compute_stats(VALUE._col7), compute_stats(VALUE._col8), compute_stats(VALUE._col9), compute_stats(VALUE._col10), compute_stats(VALUE._col11), compute_stats(VALUE._col12), compute_stats(VALUE._col13), compute_stats(VALUE._col14), compute_stats(VALUE._col15), compute_ [...]
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19
+                Statistics: Num rows: 1 Data size: 8800 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8800 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1_n1
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20
+          Column Types: string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string
+          Table: default.dest1_n1
+
+PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_',
+  '%_' LIKE '\%\_', 'ab' LIKE '\%\_', 'ab' LIKE '_a%', 'ab' LIKE 'a',
+  '' RLIKE '.*', 'a' RLIKE '[ab]', '' RLIKE '[ab]', 'hadoop' RLIKE '[a-z]*', 'hadoop' RLIKE 'o*',
+  REGEXP_REPLACE('abc', 'b', 'c'), REGEXP_REPLACE('abc', 'z', 'a'), REGEXP_REPLACE('abbbb', 'bb', 'b'),
+  REGEXP_REPLACE('hadoop', '(.)[a-z]*', '$1ive'), REGEXP_REPLACE('hadoopAAA','A.*',''),
+  REGEXP_REPLACE('abc', '', 'A'), 'abc' RLIKE ''
+  WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1_n1
+POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n1 SELECT 'a' LIKE '%a%', 'b' LIKE '%a%', 'ab' LIKE '%a%', 'ab' LIKE '%a_',
+  '%_' LIKE '\%\_', 'ab' LIKE '\%\_', 'ab' LIKE '_a%', 'ab' LIKE 'a',
+  '' RLIKE '.*', 'a' RLIKE '[ab]', '' RLIKE '[ab]', 'hadoop' RLIKE '[a-z]*', 'hadoop' RLIKE 'o*',
+  REGEXP_REPLACE('abc', 'b', 'c'), REGEXP_REPLACE('abc', 'z', 'a'), REGEXP_REPLACE('abbbb', 'bb', 'b'),
+  REGEXP_REPLACE('hadoop', '(.)[a-z]*', '$1ive'), REGEXP_REPLACE('hadoopAAA','A.*',''),
+  REGEXP_REPLACE('abc', '', 'A'), 'abc' RLIKE ''
+  WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1_n1
+POSTHOOK: Lineage: dest1_n1.c1 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c10 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c11 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c12 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c13 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c14 SIMPLE []
+POSTHOOK: Lineage: dest1_n1.c15 SIMPLE []
+POSTHOOK: Lineage: dest1_n1.c16 SIMPLE []
+POSTHOOK: Lineage: dest1_n1.c17 SIMPLE []
+POSTHOOK: Lineage: dest1_n1.c18 SIMPLE []
+POSTHOOK: Lineage: dest1_n1.c19 SIMPLE []
+POSTHOOK: Lineage: dest1_n1.c2 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c20 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c3 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c4 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c5 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c6 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c7 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c8 EXPRESSION []
+POSTHOOK: Lineage: dest1_n1.c9 EXPRESSION []
+PREHOOK: query: SELECT dest1_n1.* FROM dest1_n1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1_n1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest1_n1.* FROM dest1_n1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1_n1
+#### A masked pattern was here ####
+TRUE	FALSE	TRUE	TRUE	TRUE	FALSE	FALSE	FALSE	TRUE	TRUE	FALSE	TRUE	TRUE	acc	abc	abb	hive	hadoop	AaAbAcA	FALSE
diff --git a/ql/src/test/results/clientpositive/udf2.q.out b/ql/src/test/results/clientpositive/llap/udf2.q.out
similarity index 60%
rename from ql/src/test/results/clientpositive/udf2.q.out
rename to ql/src/test/results/clientpositive/llap/udf2.q.out
index bcc2faa..be6a35f 100644
--- a/ql/src/test/results/clientpositive/udf2.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf2.q.out
@@ -26,34 +26,19 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest1_n55
 #### A masked pattern was here ####
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: dest1_n55
-            Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: '|' (type: string), trim(c1) (type: string), '|' (type: string), rtrim(c1) (type: string), '|' (type: string), ltrim(c1) (type: string), '|' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-              Statistics: Num rows: 1 Data size: 892 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 892 Basic stats: COMPLETE Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: dest1_n55
+          Select Operator
+            expressions: '|' (type: string), trim(c1) (type: string), '|' (type: string), rtrim(c1) (type: string), '|' (type: string), ltrim(c1) (type: string), '|' (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+            ListSink
 
 PREHOOK: query: SELECT '|', trim(dest1_n55.c1), '|', rtrim(dest1_n55.c1), '|', ltrim(dest1_n55.c1), '|' FROM dest1_n55
 PREHOOK: type: QUERY
diff --git a/ql/src/test/results/clientpositive/llap/udf3.q.out b/ql/src/test/results/clientpositive/llap/udf3.q.out
new file mode 100644
index 0000000..a3c993b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/udf3.q.out
@@ -0,0 +1,136 @@
+PREHOOK: query: CREATE TABLE dest1_n104(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1_n104
+POSTHOOK: query: CREATE TABLE dest1_n104(c1 STRING, c2 STRING, c3 STRING, c4 STRING, c5 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1_n104
+PREHOOK: query: EXPLAIN
+FROM src INSERT OVERWRITE TABLE dest1_n104 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), 
+min(CAST('' AS INT)), max(CAST('' AS INT))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1_n104
+POSTHOOK: query: EXPLAIN
+FROM src INSERT OVERWRITE TABLE dest1_n104 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), 
+min(CAST('' AS INT)), max(CAST('' AS INT))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1_n104
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count(null), sum(null), min(null), max(null)
+                      minReductionHashAggr: 0.99
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        null sort order: 
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: int), _col3 (type: int)
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0), sum(VALUE._col1), min(VALUE._col2), max(VALUE._col3)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: CAST( _col0 AS STRING) (type: string), CAST( _col1 AS STRING) (type: string), CAST( (UDFToDouble(_col1) / _col0) AS STRING) (type: string), CAST( _col2 AS STRING) (type: string), CAST( _col3 AS STRING) (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                  Statistics: Num rows: 1 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest1_n104
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string)
+                    outputColumnNames: c1, c2, c3, c4, c5
+                    Statistics: Num rows: 1 Data size: 920 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: compute_stats(c1, 'hll'), compute_stats(c2, 'hll'), compute_stats(c3, 'hll'), compute_stats(c4, 'hll'), compute_stats(c5, 'hll')
+                      mode: complete
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Statistics: Num rows: 1 Data size: 2200 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 2200 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1_n104
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: c1, c2, c3, c4, c5
+          Column Types: string, string, string, string, string
+          Table: default.dest1_n104
+
+PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n104 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), 
+min(CAST('' AS INT)), max(CAST('' AS INT))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1_n104
+POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n104 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), 
+min(CAST('' AS INT)), max(CAST('' AS INT))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1_n104
+POSTHOOK: Lineage: dest1_n104.c1 EXPRESSION []
+POSTHOOK: Lineage: dest1_n104.c2 EXPRESSION []
+POSTHOOK: Lineage: dest1_n104.c3 EXPRESSION []
+POSTHOOK: Lineage: dest1_n104.c4 EXPRESSION []
+POSTHOOK: Lineage: dest1_n104.c5 EXPRESSION []
+PREHOOK: query: SELECT dest1_n104.* FROM dest1_n104
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1_n104
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest1_n104.* FROM dest1_n104
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1_n104
+#### A masked pattern was here ####
+0	NULL	NULL	NULL	NULL
diff --git a/ql/src/test/results/clientpositive/udf4.q.out b/ql/src/test/results/clientpositive/llap/udf4.q.out
similarity index 68%
rename from ql/src/test/results/clientpositive/udf4.q.out
rename to ql/src/test/results/clientpositive/llap/udf4.q.out
index d9b841a..8d3a86f 100644
--- a/ql/src/test/results/clientpositive/udf4.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf4.q.out
@@ -70,34 +70,19 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest1_n149
 #### A masked pattern was here ####
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: dest1_n149
-            Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: 1 (type: decimal(1,0)), 2 (type: decimal(2,0)), -2 (type: decimal(2,0)), 1 (type: decimal(2,0)), 1 (type: decimal(2,0)), -2 (type: decimal(2,0)), 1.0D (type: double), null (type: double), 0.0D (type: double), 1 (type: decimal(2,0)), 2 (type: decimal(2,0)), -1 (type: decimal(2,0)), 1 (type: decimal(2,0)), rand(3) (type: double), 3 (type: int), -3 (type: int), 3 (type: int), -1 (type: int), -2 (type: int), -2Y (type: tinyint), -2S (type: smallint), -2L (type: big [...]
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33
-              Statistics: Num rows: 1 Data size: 1248 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 1248 Basic stats: COMPLETE Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: dest1_n149
+          Select Operator
+            expressions: 1 (type: decimal(1,0)), 2 (type: decimal(2,0)), -2 (type: decimal(2,0)), 1 (type: decimal(2,0)), 1 (type: decimal(2,0)), -2 (type: decimal(2,0)), 1.0D (type: double), null (type: double), 0.0D (type: double), 1 (type: decimal(2,0)), 2 (type: decimal(2,0)), -1 (type: decimal(2,0)), 1 (type: decimal(2,0)), rand(3) (type: double), 3 (type: int), -3 (type: int), 3 (type: int), -1 (type: int), -2 (type: int), -2Y (type: tinyint), -2S (type: smallint), -2L (type: bigin [...]
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33
+            ListSink
 
 PREHOOK: query: SELECT round(1.0), round(1.5), round(-1.5), floor(1.0), floor(1.5), floor(-1.5), sqrt(1.0), sqrt(-1.0), sqrt(0.0), ceil(1.0), ceil(1.5), ceil(-1.5), ceiling(1.0), rand(3), +3, -3, 1++2, 1+-2, 
 ~1, 
diff --git a/ql/src/test/results/clientpositive/udf5.q.out b/ql/src/test/results/clientpositive/llap/udf5.q.out
similarity index 92%
rename from ql/src/test/results/clientpositive/udf5.q.out
rename to ql/src/test/results/clientpositive/llap/udf5.q.out
index 58a1dab..21ee1b6 100644
--- a/ql/src/test/results/clientpositive/udf5.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf5.q.out
@@ -35,11 +35,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: dest1_n14
-          Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: '2008-11-11 15:32:20' (type: string), DATE'2008-11-11' (type: date), 1 (type: int), 11 (type: int), 2008 (type: int), 1 (type: int), 11 (type: int), 2008 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
-            Statistics: Num rows: 1 Data size: 183 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1_n14
@@ -71,11 +69,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: dest1_n14
-          Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: '01/13/10 11:57:40' (type: string), '2010-01-13 11:57:40' (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT from_unixtime(unix_timestamp('2010-01-13 11:57:40', 'yyyy-MM-dd HH:mm:ss'), 'MM/dd/yy HH:mm:ss'), from_unixtime(unix_timestamp('2010-01-13 11:57:40')) from dest1_n14
diff --git a/ql/src/test/results/clientpositive/udf6.q.out b/ql/src/test/results/clientpositive/llap/udf6.q.out
similarity index 91%
rename from ql/src/test/results/clientpositive/udf6.q.out
rename to ql/src/test/results/clientpositive/llap/udf6.q.out
index e6d5832..81d429e 100644
--- a/ql/src/test/results/clientpositive/udf6.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf6.q.out
@@ -35,11 +35,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: dest1_n60
-          Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 1 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT IF(TRUE, 1, 2) FROM dest1_n60
@@ -81,11 +79,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: dest1_n60
-          Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 1 (type: int), 2 (type: int), 2 (type: int), 'a' (type: string), 0.1 (type: decimal(1,1)), 2L (type: bigint), 126Y (type: tinyint), 128S (type: smallint), 128 (type: int), 1.0D (type: double), '128' (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-            Statistics: Num rows: 1 Data size: 324 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT IF(TRUE, 1, 2), IF(FALSE, 1, 2), IF(NULL, 1, 2), IF(TRUE, "a", "b"),
diff --git a/ql/src/test/results/clientpositive/udf7.q.out b/ql/src/test/results/clientpositive/llap/udf7.q.out
similarity index 96%
rename from ql/src/test/results/clientpositive/udf7.q.out
rename to ql/src/test/results/clientpositive/llap/udf7.q.out
index 44b282f..b9e91ab 100644
--- a/ql/src/test/results/clientpositive/udf7.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf7.q.out
@@ -49,11 +49,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: dest1_n111
-          Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 1.098612288668D (type: double), null (type: double), null (type: double), 1.098612288668D (type: double), null (type: double), null (type: double), 1.584962500721D (type: double), null (type: double), null (type: double), 0.47712125472D (type: double), null (type: double), null (type: double), 1.584962500721D (type: double), null (type: double), null (type: double), null (type: double), -1.0D (type: double), 7.389056098931D (type: double), 8.0D (type: double), 8. [...]
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27
-            Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0),
diff --git a/ql/src/test/results/clientpositive/llap/udf8.q.out b/ql/src/test/results/clientpositive/llap/udf8.q.out
new file mode 100644
index 0000000..db9dd1f
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/udf8.q.out
@@ -0,0 +1,105 @@
+PREHOOK: query: CREATE TABLE dest1_n54(c1 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1_n54
+POSTHOOK: query: CREATE TABLE dest1_n54(c1 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1_n54
+PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n54 SELECT '' WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1_n54
+POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n54 SELECT '' WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1_n54
+POSTHOOK: Lineage: dest1_n54.c1 SIMPLE []
+PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n54 SELECT '1' WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1_n54
+POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest1_n54 SELECT '1' WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1_n54
+POSTHOOK: Lineage: dest1_n54.c1 SIMPLE []
+PREHOOK: query: EXPLAIN
+SELECT avg(c1), sum(c1), count(c1) FROM dest1_n54
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1_n54
+#### A masked pattern was here ####
+POSTHOOK: query: EXPLAIN
+SELECT avg(c1), sum(c1), count(c1) FROM dest1_n54
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1_n54
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: dest1_n54
+                  Statistics: Num rows: 1 Data size: 85 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: c1 (type: string)
+                    outputColumnNames: c1
+                    Statistics: Num rows: 1 Data size: 85 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: sum(c1), count(c1)
+                      minReductionHashAggr: 0.0
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        null sort order: 
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: double), _col1 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), count(VALUE._col1)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: (_col0 / _col1) (type: double), _col0 (type: double), _col1 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT avg(c1), sum(c1), count(c1) FROM dest1_n54
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1_n54
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT avg(c1), sum(c1), count(c1) FROM dest1_n54
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1_n54
+#### A masked pattern was here ####
+1.0	1.0	1
diff --git a/ql/src/test/results/clientpositive/udf9.q.out b/ql/src/test/results/clientpositive/llap/udf9.q.out
similarity index 63%
rename from ql/src/test/results/clientpositive/udf9.q.out
rename to ql/src/test/results/clientpositive/llap/udf9.q.out
index a55b3cd..7d1d4f7 100644
--- a/ql/src/test/results/clientpositive/udf9.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf9.q.out
@@ -25,38 +25,22 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            filterExpr: (UDFToDouble(key) = 86.0D) (type: boolean)
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: (UDFToDouble(key) = 86.0D) (type: boolean)
-              Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: -1 (type: int), 2 (type: int), 32 (type: int), -1 (type: int), DATE'2009-01-01' (type: date), DATE'2009-12-31' (type: date), DATE'2008-03-01' (type: date), DATE'2009-03-02' (type: date), DATE'2008-02-28' (type: date), DATE'2009-02-27' (type: date), DATE'2008-12-31' (type: date), DATE'2008-01-02' (type: date), DATE'2008-02-26' (type: date), DATE'2009-02-26' (type: date), DATE'2006-02-28' (type: date), DATE'2005-02-28' (type: date)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-                Statistics: Num rows: 250 Data size: 172000 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 250 Data size: 172000 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: src
+          filterExpr: (UDFToDouble(key) = 86.0D) (type: boolean)
+          Filter Operator
+            predicate: (UDFToDouble(key) = 86.0D) (type: boolean)
+            Select Operator
+              expressions: -1 (type: int), 2 (type: int), 32 (type: int), -1 (type: int), DATE'2009-01-01' (type: date), DATE'2009-12-31' (type: date), DATE'2008-03-01' (type: date), DATE'2009-03-02' (type: date), DATE'2008-02-28' (type: date), DATE'2009-02-27' (type: date), DATE'2008-12-31' (type: date), DATE'2008-01-02' (type: date), DATE'2008-02-26' (type: date), DATE'2009-02-26' (type: date), DATE'2006-02-28' (type: date), DATE'2005-02-28' (type: date)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
+              ListSink
 
 PREHOOK: query: SELECT DATEDIFF('2008-12-31', '2009-01-01'), DATEDIFF('2008-03-01', '2008-02-28'),
        DATEDIFF('2007-03-01', '2007-01-28'), DATEDIFF('2008-03-01 23:59:59', '2008-03-02 00:00:00'),
diff --git a/ql/src/test/results/clientpositive/llap/udf_10_trims.q.out b/ql/src/test/results/clientpositive/llap/udf_10_trims.q.out
new file mode 100644
index 0000000..ddf2264
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/udf_10_trims.q.out
@@ -0,0 +1,128 @@
+PREHOOK: query: CREATE TABLE dest1_n5(c1 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1_n5
+POSTHOOK: query: CREATE TABLE dest1_n5(c1 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1_n5
+PREHOOK: query: EXPLAIN
+INSERT OVERWRITE TABLE dest1_n5
+SELECT trim(trim(trim(trim(trim(trim(trim(trim(trim(trim( '  abc  '))))))))))
+FROM src
+WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1_n5
+POSTHOOK: query: EXPLAIN
+INSERT OVERWRITE TABLE dest1_n5
+SELECT trim(trim(trim(trim(trim(trim(trim(trim(trim(trim( '  abc  '))))))))))
+FROM src
+WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1_n5
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  filterExpr: (UDFToDouble(key) = 86.0D) (type: boolean)
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (UDFToDouble(key) = 86.0D) (type: boolean)
+                    Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: 'abc' (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.dest1_n5
+                      Select Operator
+                        expressions: _col0 (type: string)
+                        outputColumnNames: c1
+                        Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: compute_stats(c1, 'hll')
+                          minReductionHashAggr: 0.99
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            null sort order: 
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1_n5
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: c1
+          Column Types: string
+          Table: default.dest1_n5
+
+PREHOOK: query: INSERT OVERWRITE TABLE dest1_n5
+SELECT trim(trim(trim(trim(trim(trim(trim(trim(trim(trim( '  abc  '))))))))))
+FROM src
+WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1_n5
+POSTHOOK: query: INSERT OVERWRITE TABLE dest1_n5
+SELECT trim(trim(trim(trim(trim(trim(trim(trim(trim(trim( '  abc  '))))))))))
+FROM src
+WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1_n5
+POSTHOOK: Lineage: dest1_n5.c1 SIMPLE []
diff --git a/ql/src/test/results/clientpositive/udf_E.q.out b/ql/src/test/results/clientpositive/llap/udf_E.q.out
similarity index 87%
rename from ql/src/test/results/clientpositive/udf_E.q.out
rename to ql/src/test/results/clientpositive/llap/udf_E.q.out
index 469f396..8a67e1d 100644
--- a/ql/src/test/results/clientpositive/udf_E.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_E.q.out
@@ -19,11 +19,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 2.718281828459045D (type: double)
             outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select E() FROM src tablesample (1 rows)
@@ -72,11 +70,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 2.718281828459045D (type: double)
             outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select E() FROM src tablesample (1 rows)
diff --git a/ql/src/test/results/clientpositive/udf_PI.q.out b/ql/src/test/results/clientpositive/llap/udf_PI.q.out
similarity index 87%
rename from ql/src/test/results/clientpositive/udf_PI.q.out
rename to ql/src/test/results/clientpositive/llap/udf_PI.q.out
index a9ec8c1..f7b8f7c 100644
--- a/ql/src/test/results/clientpositive/udf_PI.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_PI.q.out
@@ -19,11 +19,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 3.141592653589793D (type: double)
             outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select PI() FROM src tablesample (1 rows)
@@ -72,11 +70,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 3.141592653589793D (type: double)
             outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select PI() FROM src tablesample (1 rows)
diff --git a/ql/src/test/results/clientpositive/udf_abs.q.out b/ql/src/test/results/clientpositive/llap/udf_abs.q.out
similarity index 89%
rename from ql/src/test/results/clientpositive/udf_abs.q.out
rename to ql/src/test/results/clientpositive/llap/udf_abs.q.out
index fee7592..9ba23cf 100644
--- a/ql/src/test/results/clientpositive/udf_abs.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_abs.q.out
@@ -46,11 +46,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 0 (type: int), 1 (type: int), 123 (type: int), 9223372036854775807L (type: bigint), 9223372036854775807L (type: bigint)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
-            Statistics: Num rows: 500 Data size: 14000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT
@@ -101,11 +99,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 0 (type: decimal(1,0)), 3.14159265 (type: decimal(9,8)), 3.14159265 (type: decimal(9,8))
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 500 Data size: 168000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT
diff --git a/ql/src/test/results/clientpositive/udf_add_months.q.out b/ql/src/test/results/clientpositive/llap/udf_add_months.q.out
similarity index 96%
rename from ql/src/test/results/clientpositive/udf_add_months.q.out
rename to ql/src/test/results/clientpositive/llap/udf_add_months.q.out
index 7150f59..61f9e07 100644
--- a/ql/src/test/results/clientpositive/udf_add_months.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_add_months.q.out
@@ -37,11 +37,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: '2014-02-14' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_aes_decrypt.q.out b/ql/src/test/results/clientpositive/llap/udf_aes_decrypt.q.out
similarity index 95%
rename from ql/src/test/results/clientpositive/udf_aes_decrypt.q.out
rename to ql/src/test/results/clientpositive/llap/udf_aes_decrypt.q.out
index 89caf99..c4d983c 100644
--- a/ql/src/test/results/clientpositive/udf_aes_decrypt.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_aes_decrypt.q.out
@@ -32,11 +32,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 414243 (type: binary)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_aes_encrypt.q.out b/ql/src/test/results/clientpositive/llap/udf_aes_encrypt.q.out
similarity index 94%
rename from ql/src/test/results/clientpositive/udf_aes_encrypt.q.out
rename to ql/src/test/results/clientpositive/llap/udf_aes_encrypt.q.out
index 3a96c4b..4879bf7 100644
--- a/ql/src/test/results/clientpositive/udf_aes_encrypt.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_aes_encrypt.q.out
@@ -32,11 +32,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: CBA4ACFB309839BA426E07D67F23564F (type: binary)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_array.q.out b/ql/src/test/results/clientpositive/llap/udf_array.q.out
similarity index 92%
rename from ql/src/test/results/clientpositive/udf_array.q.out
rename to ql/src/test/results/clientpositive/llap/udf_array.q.out
index a2ebaba..d218f41 100644
--- a/ql/src/test/results/clientpositive/udf_array.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_array.q.out
@@ -31,11 +31,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: array() (type: array<string>), array()[1] (type: string), array(1,2,3) (type: array<int>), array(1,2,3)[2] (type: int), array(1,'a',2,3) (type: array<string>), array(1,'a',2,3)[2] (type: string), array(array(1),array(2),array(3),array(4))[1][0] (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-            Statistics: Num rows: 500 Data size: 624000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT array(), array()[1], array(1, 2, 3), array(1, 2, 3)[2], array(1,"a", 2, 3), array(1,"a", 2, 3)[2],
diff --git a/ql/src/test/results/clientpositive/udf_ascii.q.out b/ql/src/test/results/clientpositive/llap/udf_ascii.q.out
similarity index 90%
rename from ql/src/test/results/clientpositive/udf_ascii.q.out
rename to ql/src/test/results/clientpositive/llap/udf_ascii.q.out
index 42dd231..d0dd7a6 100644
--- a/ql/src/test/results/clientpositive/udf_ascii.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_ascii.q.out
@@ -42,11 +42,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 70 (type: int), 0 (type: int), 33 (type: int)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 500 Data size: 6000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT
diff --git a/ql/src/test/results/clientpositive/udf_between.q.out b/ql/src/test/results/clientpositive/llap/udf_between.q.out
similarity index 82%
rename from ql/src/test/results/clientpositive/udf_between.q.out
rename to ql/src/test/results/clientpositive/llap/udf_between.q.out
index 1a4eb07..e8fb4a8 100644
--- a/ql/src/test/results/clientpositive/udf_between.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_between.q.out
@@ -29,17 +29,13 @@ STAGE PLANS:
         TableScan
           alias: src
           filterExpr: (UDFToDouble(key) + 100.0D) BETWEEN 100.0D AND 200.0D (type: boolean)
-          Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (UDFToDouble(key) + 100.0D) BETWEEN 100.0D AND 200.0D (type: boolean)
-            Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
               Limit
                 Number of rows: 20
-                Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
                 ListSink
 
 PREHOOK: query: SELECT * FROM src where key + 100 between (150 + -50) AND (150 + 50) LIMIT 20
@@ -89,17 +85,13 @@ STAGE PLANS:
         TableScan
           alias: src
           filterExpr: (UDFToDouble(key) + 100.0D) NOT BETWEEN 100.0D AND 200.0D (type: boolean)
-          Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (UDFToDouble(key) + 100.0D) NOT BETWEEN 100.0D AND 200.0D (type: boolean)
-            Statistics: Num rows: 445 Data size: 79210 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 445 Data size: 79210 Basic stats: COMPLETE Column stats: COMPLETE
               Limit
                 Number of rows: 20
-                Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
                 ListSink
 
 PREHOOK: query: SELECT * FROM src where key + 100 not between (150 + -50) AND (150 + 50) LIMIT 20
@@ -148,14 +140,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src
-          Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: key (type: string), value (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             Limit
               Number of rows: 1
-              Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT * FROM src where 'b' between 'a' AND 'c' LIMIT 1
@@ -185,14 +174,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src
-          Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: key (type: string), value (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             Limit
               Number of rows: 1
-              Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT * FROM src where 2 between 2 AND '3' LIMIT 1
@@ -257,14 +243,11 @@ STAGE PLANS:
         TableScan
           alias: t
           filterExpr: (i BETWEEN 8 AND 9 or i BETWEEN 9 AND 10) (type: boolean)
-          Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (i BETWEEN 8 AND 9 or i BETWEEN 9 AND 10) (type: boolean)
-            Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: i (type: int)
               outputColumnNames: _col0
-              Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT * FROM t	where	i between 8 and 9
@@ -303,14 +286,11 @@ STAGE PLANS:
         TableScan
           alias: t
           filterExpr: (i BETWEEN 6 AND 7 or i BETWEEN 9 AND 10) (type: boolean)
-          Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (i BETWEEN 6 AND 7 or i BETWEEN 9 AND 10) (type: boolean)
-            Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: i (type: int)
               outputColumnNames: _col0
-              Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT * FROM t	where	i between 6 and 7
@@ -350,14 +330,11 @@ STAGE PLANS:
         TableScan
           alias: t
           filterExpr: (i NOT BETWEEN 6 AND 7 and i NOT BETWEEN 9 AND 10) (type: boolean)
-          Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (i NOT BETWEEN 6 AND 7 and i NOT BETWEEN 9 AND 10) (type: boolean)
-            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: i (type: int)
               outputColumnNames: _col0
-              Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT * FROM t	where	i not between 6 and 7 
diff --git a/ql/src/test/results/clientpositive/udf_bitwise_shiftleft.q.out b/ql/src/test/results/clientpositive/llap/udf_bitwise_shiftleft.q.out
similarity index 96%
rename from ql/src/test/results/clientpositive/udf_bitwise_shiftleft.q.out
rename to ql/src/test/results/clientpositive/llap/udf_bitwise_shiftleft.q.out
index 0dd68b7..c81eb0a 100644
--- a/ql/src/test/results/clientpositive/udf_bitwise_shiftleft.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_bitwise_shiftleft.q.out
@@ -33,11 +33,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 8 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_bitwise_shiftright.q.out b/ql/src/test/results/clientpositive/llap/udf_bitwise_shiftright.q.out
similarity index 96%
rename from ql/src/test/results/clientpositive/udf_bitwise_shiftright.q.out
rename to ql/src/test/results/clientpositive/llap/udf_bitwise_shiftright.q.out
index 9b3b17d..6a7db86 100644
--- a/ql/src/test/results/clientpositive/udf_bitwise_shiftright.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_bitwise_shiftright.q.out
@@ -33,11 +33,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 2 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_bitwise_shiftrightunsigned.q.out b/ql/src/test/results/clientpositive/llap/udf_bitwise_shiftrightunsigned.q.out
similarity index 96%
rename from ql/src/test/results/clientpositive/udf_bitwise_shiftrightunsigned.q.out
rename to ql/src/test/results/clientpositive/llap/udf_bitwise_shiftrightunsigned.q.out
index be49958..5a150df 100644
--- a/ql/src/test/results/clientpositive/udf_bitwise_shiftrightunsigned.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_bitwise_shiftrightunsigned.q.out
@@ -33,11 +33,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 2 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_case.q.out b/ql/src/test/results/clientpositive/llap/udf_case.q.out
similarity index 96%
rename from ql/src/test/results/clientpositive/udf_case.q.out
rename to ql/src/test/results/clientpositive/llap/udf_case.q.out
index 60a348e..49fc66c 100644
--- a/ql/src/test/results/clientpositive/udf_case.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_case.q.out
@@ -93,11 +93,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 2 (type: int), 5 (type: int), 15 (type: int), null (type: int), 20 (type: int), 24 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-            Statistics: Num rows: 500 Data size: 10004 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT CASE 1
diff --git a/ql/src/test/results/clientpositive/llap/udf_case_column_pruning.q.out b/ql/src/test/results/clientpositive/llap/udf_case_column_pruning.q.out
new file mode 100644
index 0000000..770c231
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/udf_case_column_pruning.q.out
@@ -0,0 +1,163 @@
+PREHOOK: query: EXPLAIN
+SELECT CASE a.key
+        WHEN '1' THEN 2
+        WHEN '3' THEN 4
+        ELSE 5
+       END as key
+FROM src a JOIN src b
+ON a.key = b.key
+ORDER BY key LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: EXPLAIN
+SELECT CASE a.key
+        WHEN '1' THEN 2
+        WHEN '3' THEN 4
+        ELSE 5
+       END as key
+FROM src a JOIN src b
+ON a.key = b.key
+ORDER BY key LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  filterExpr: key is not null (type: boolean)
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), CASE WHEN ((key = '1')) THEN (2) WHEN ((key = '3')) THEN (4) ELSE (5) END (type: int)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: int)
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  filterExpr: key is not null (type: boolean)
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1
+                Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE
+                Top N Key Operator
+                  sort order: +
+                  keys: _col1 (type: int)
+                  null sort order: z
+                  Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE
+                  top n: 10
+                  Select Operator
+                    expressions: _col1 (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      null sort order: z
+                      sort order: +
+                      Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 0.1
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 791 Data size: 3164 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT CASE a.key
+        WHEN '1' THEN 2
+        WHEN '3' THEN 4
+        ELSE 5
+       END as key
+FROM src a JOIN src b
+ON a.key = b.key
+ORDER BY key LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT CASE a.key
+        WHEN '1' THEN 2
+        WHEN '3' THEN 4
+        ELSE 5
+       END as key
+FROM src a JOIN src b
+ON a.key = b.key
+ORDER BY key LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+5
+5
+5
+5
+5
+5
+5
+5
+5
+5
diff --git a/ql/src/test/results/clientpositive/udf_case_thrift.q.out b/ql/src/test/results/clientpositive/llap/udf_case_thrift.q.out
similarity index 94%
rename from ql/src/test/results/clientpositive/udf_case_thrift.q.out
rename to ql/src/test/results/clientpositive/llap/udf_case_thrift.q.out
index e540ff9..3a70d8b 100644
--- a/ql/src/test/results/clientpositive/udf_case_thrift.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_case_thrift.q.out
@@ -49,11 +49,9 @@ STAGE PLANS:
         TableScan
           alias: src_thrift
           Row Limit Per Split: 3
-          Statistics: Num rows: 11 Data size: 22440 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: CASE (lint[0]) WHEN (0) THEN ((lint[0] + 1)) WHEN (1) THEN ((lint[0] + 2)) WHEN (2) THEN (100) ELSE (5) END (type: int), CASE (lstring[0]) WHEN ('0') THEN ('zero') WHEN ('10') THEN (concat(lstring[0], ' is ten')) ELSE ('default') END (type: string), CASE (lstring[0]) WHEN ('0') THEN (lstring) ELSE (null) END[0] (type: string)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 11 Data size: 22440 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: SELECT CASE src_thrift.lint[0]
diff --git a/ql/src/test/results/clientpositive/udf_cbrt.q.out b/ql/src/test/results/clientpositive/llap/udf_cbrt.q.out
similarity index 89%
rename from ql/src/test/results/clientpositive/udf_cbrt.q.out
rename to ql/src/test/results/clientpositive/llap/udf_cbrt.q.out
index fd02051..45461af 100644
--- a/ql/src/test/results/clientpositive/udf_cbrt.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_cbrt.q.out
@@ -32,11 +32,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 3.0D (type: double)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_character_length.q.out b/ql/src/test/results/clientpositive/llap/udf_character_length.q.out
similarity index 64%
rename from ql/src/test/results/clientpositive/udf_character_length.q.out
rename to ql/src/test/results/clientpositive/llap/udf_character_length.q.out
index e854005..e4c22c6 100644
--- a/ql/src/test/results/clientpositive/udf_character_length.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_character_length.q.out
@@ -48,70 +48,70 @@ POSTHOOK: Input: default@src1
 POSTHOOK: Output: default@dest1_n97
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src1
-            Statistics: Num rows: 25 Data size: 2225 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: character_length(value) (type: int)
-              outputColumnNames: _col0
-              Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.dest1_n97
-              Select Operator
-                expressions: _col0 (type: int)
-                outputColumnNames: len
-                Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: compute_stats(len, 'hll')
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    null sort order: 
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: compute_stats(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  Statistics: Num rows: 25 Data size: 2225 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: character_length(value) (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.dest1_n97
+                    Select Operator
+                      expressions: _col0 (type: int)
+                      outputColumnNames: len
+                      Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: compute_stats(len, 'hll')
+                        minReductionHashAggr: 0.96
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          null sort order: 
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
 
   Stage: Stage-0
     Move Operator
@@ -123,7 +123,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest1_n97
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats Work
       Basic Stats Work:
       Column Stats Desc:
@@ -131,36 +131,6 @@ STAGE PLANS:
           Column Types: int
           Table: default.dest1_n97
 
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.dest1_n97
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.dest1_n97
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n97 SELECT character_length(src1.value)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
@@ -254,11 +224,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: dest1_n97
-          Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: character_length(name) (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: SELECT character_length(dest1_n97.name) FROM dest1_n97
@@ -289,11 +257,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: dest1_n97
-          Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: character_length(name) (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: SELECT char_length(dest1_n97.name) FROM dest1_n97
diff --git a/ql/src/test/results/clientpositive/udf_concat_insert1.q.out b/ql/src/test/results/clientpositive/llap/udf_concat_insert1.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/udf_concat_insert1.q.out
rename to ql/src/test/results/clientpositive/llap/udf_concat_insert1.q.out
diff --git a/ql/src/test/results/clientpositive/udf_concat_ws.q.out b/ql/src/test/results/clientpositive/llap/udf_concat_ws.q.out
similarity index 95%
rename from ql/src/test/results/clientpositive/udf_concat_ws.q.out
rename to ql/src/test/results/clientpositive/llap/udf_concat_ws.q.out
index 34bb9b3..71b5ace 100644
--- a/ql/src/test/results/clientpositive/udf_concat_ws.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_concat_ws.q.out
@@ -58,11 +58,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: dest1_n8
-          Statistics: Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: concat_ws(c1, c2, c3) (type: string), concat_ws(',', c1, c2, c3) (type: string), concat_ws(null, c1, c2, c3) (type: string), concat_ws('**', c1, null, c3) (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 736 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT concat_ws(dest1_n8.c1, dest1_n8.c2, dest1_n8.c3),
@@ -113,11 +111,9 @@ STAGE PLANS:
         TableScan
           alias: dest1_n8
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: concat_ws('.', array('www','face','book','com'), '1234') (type: string), concat_ws('-', 'www', array('face','book','com'), '1234') (type: string), concat_ws('F', 'www', array('face','book','com','1234')) (type: string), concat_ws('_', array('www','face'), array('book','com','1234')) (type: string), concat_ws('**', 'www', array('face'), array('book','com','1234')) (type: string), concat_ws('[]', array('www'), 'face', array('book','com','1234')) (type: string), con [...]
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-            Statistics: Num rows: 1 Data size: 758 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'),
diff --git a/ql/src/test/results/clientpositive/udf_crc32.q.out b/ql/src/test/results/clientpositive/llap/udf_crc32.q.out
similarity index 91%
rename from ql/src/test/results/clientpositive/udf_crc32.q.out
rename to ql/src/test/results/clientpositive/llap/udf_crc32.q.out
index ef48075..fa6e4c3 100644
--- a/ql/src/test/results/clientpositive/udf_crc32.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_crc32.q.out
@@ -34,11 +34,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 2743272264L (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_current_database.q.out b/ql/src/test/results/clientpositive/llap/udf_current_database.q.out
similarity index 86%
rename from ql/src/test/results/clientpositive/udf_current_database.q.out
rename to ql/src/test/results/clientpositive/llap/udf_current_database.q.out
index 96561e6..7597a88 100644
--- a/ql/src/test/results/clientpositive/udf_current_database.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_current_database.q.out
@@ -24,11 +24,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'default' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select current_database()
@@ -73,11 +71,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'xxx' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select current_database()
@@ -116,11 +112,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'default' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select current_database()
@@ -159,11 +153,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'xxx' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select current_database()
diff --git a/ql/src/test/results/clientpositive/udf_date_format.q.out b/ql/src/test/results/clientpositive/llap/udf_date_format.q.out
similarity index 97%
rename from ql/src/test/results/clientpositive/udf_date_format.q.out
rename to ql/src/test/results/clientpositive/llap/udf_date_format.q.out
index 928948c..034bfe3 100644
--- a/ql/src/test/results/clientpositive/udf_date_format.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_date_format.q.out
@@ -32,11 +32,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'Wednesday' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_datetime_legacy_hybrid_calendar.q.out b/ql/src/test/results/clientpositive/llap/udf_datetime_legacy_hybrid_calendar.q.out
similarity index 83%
rename from ql/src/test/results/clientpositive/udf_datetime_legacy_hybrid_calendar.q.out
rename to ql/src/test/results/clientpositive/llap/udf_datetime_legacy_hybrid_calendar.q.out
index bd22442..a505a25 100644
--- a/ql/src/test/results/clientpositive/udf_datetime_legacy_hybrid_calendar.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_datetime_legacy_hybrid_calendar.q.out
@@ -109,23 +109,27 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: datetime_legacy_hybrid_calendar
-            Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: dt (type: date), datetime_legacy_hybrid_calendar(dt) (type: date), ts (type: timestamp), datetime_legacy_hybrid_calendar(ts) (type: timestamp)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: datetime_legacy_hybrid_calendar
+                  Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: dt (type: date), datetime_legacy_hybrid_calendar(dt) (type: date), ts (type: timestamp), datetime_legacy_hybrid_calendar(ts) (type: timestamp)
+                    outputColumnNames: _col0, _col1, _col2, _col3
+                    Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
 
   Stage: Stage-0
     Fetch Operator
diff --git a/ql/src/test/results/clientpositive/udf_decode.q.out b/ql/src/test/results/clientpositive/llap/udf_decode.q.out
similarity index 91%
rename from ql/src/test/results/clientpositive/udf_decode.q.out
rename to ql/src/test/results/clientpositive/llap/udf_decode.q.out
index bb6cd78..6643309 100644
--- a/ql/src/test/results/clientpositive/udf_decode.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_decode.q.out
@@ -32,11 +32,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'TestDecode1' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_degrees.q.out b/ql/src/test/results/clientpositive/llap/udf_degrees.q.out
similarity index 88%
rename from ql/src/test/results/clientpositive/udf_degrees.q.out
rename to ql/src/test/results/clientpositive/llap/udf_degrees.q.out
index 1440c5f..b556224 100644
--- a/ql/src/test/results/clientpositive/udf_degrees.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_degrees.q.out
@@ -19,11 +19,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 180.0D (type: double)
             outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select degrees(PI()) FROM src tablesample (1 rows)
@@ -72,11 +70,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 180.0D (type: double)
             outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select degrees(PI()) FROM src tablesample (1 rows)
diff --git a/ql/src/test/results/clientpositive/udf_elt.q.out b/ql/src/test/results/clientpositive/llap/udf_elt.q.out
similarity index 94%
rename from ql/src/test/results/clientpositive/udf_elt.q.out
rename to ql/src/test/results/clientpositive/llap/udf_elt.q.out
index 2e7fb9f..c17af3d 100644
--- a/ql/src/test/results/clientpositive/udf_elt.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_elt.q.out
@@ -56,11 +56,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: string), null (type: string), null (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-            Statistics: Num rows: 500 Data size: 353752 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT elt(2, 'abc', 'defg'),
diff --git a/ql/src/test/results/clientpositive/udf_example_add.q.out b/ql/src/test/results/clientpositive/llap/udf_example_add.q.out
similarity index 89%
rename from ql/src/test/results/clientpositive/udf_example_add.q.out
rename to ql/src/test/results/clientpositive/llap/udf_example_add.q.out
index 43b4edd..79c568c 100644
--- a/ql/src/test/results/clientpositive/udf_example_add.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_example_add.q.out
@@ -38,14 +38,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003D (type: double), 6.6D (type: double), 11.0D (type: double), 10.4D (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-            Statistics: Num rows: 500 Data size: 22000 Basic stats: COMPLETE Column stats: COMPLETE
             Limit
               Number of rows: 1
-              Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT example_add(1, 2),
diff --git a/ql/src/test/results/clientpositive/llap/udf_explode.q.out b/ql/src/test/results/clientpositive/llap/udf_explode.q.out
new file mode 100644
index 0000000..0f9d91d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/udf_explode.q.out
@@ -0,0 +1,655 @@
+PREHOOK: query: DESCRIBE FUNCTION explode
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: DESCRIBE FUNCTION explode
+POSTHOOK: type: DESCFUNCTION
+explode(a) - separates the elements of array a into multiple rows, or the elements of a map into multiple rows and columns 
+PREHOOK: query: DESCRIBE FUNCTION EXTENDED explode
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: DESCRIBE FUNCTION EXTENDED explode
+POSTHOOK: type: DESCFUNCTION
+explode(a) - separates the elements of array a into multiple rows, or the elements of a map into multiple rows and columns 
+Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDTFExplode
+Function type:BUILTIN
+PREHOOK: query: EXPLAIN EXTENDED SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows) ORDER BY myCol
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: EXPLAIN EXTENDED SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows) ORDER BY myCol
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Row Limit Per Split: 1
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Select Operator
+                    expressions: array(1,2,3) (type: array<int>)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE
+                    UDTF Operator
+                      Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE
+                      function name: explode
+                      Reduce Output Operator
+                        bucketingVersion: 2
+                        key expressions: col (type: int)
+                        null sort order: z
+                        numBuckets: -1
+                        sort order: +
+                        Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: -1
+                        auto parallelism: false
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    bucketing_version 2
+                    column.name.delimiter ,
+                    columns key,value
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucketing_version 2
+                      column.name.delimiter ,
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [src]
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  bucketingVersion: 2
+                  compressed: false
+                  GlobalTableId: 0
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 1
+                  Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      properties:
+                        bucketing_version -1
+                        columns _col0
+                        columns.types int
+                        escape.delim \
+                        hive.serialization.extend.additional.nesting.levels true
+                        serialization.escape.crlf true
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  TotalFiles: 1
+                  GatherStats: false
+                  MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN EXTENDED SELECT a.myCol, count(1) FROM (SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows)) a GROUP BY a.myCol ORDER BY a.myCol
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: EXPLAIN EXTENDED SELECT a.myCol, count(1) FROM (SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows)) a GROUP BY a.myCol ORDER BY a.myCol
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Row Limit Per Split: 1
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Select Operator
+                    expressions: array(1,2,3) (type: array<int>)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE
+                    UDTF Operator
+                      Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE
+                      function name: explode
+                      Group By Operator
+                        aggregations: count(1)
+                        keys: col (type: int)
+                        minReductionHashAggr: 0.99
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          bucketingVersion: 2
+                          key expressions: _col0 (type: int)
+                          null sort order: z
+                          numBuckets: -1
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          tag: -1
+                          value expressions: _col1 (type: bigint)
+                          auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    bucketing_version 2
+                    column.name.delimiter ,
+                    columns key,value
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucketing_version 2
+                      column.name.delimiter ,
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [src]
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  bucketingVersion: 2
+                  key expressions: _col0 (type: int)
+                  null sort order: z
+                  numBuckets: -1
+                  sort order: +
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  tag: -1
+                  value expressions: _col1 (type: bigint)
+                  auto parallelism: false
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: bigint)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  bucketingVersion: 2
+                  compressed: false
+                  GlobalTableId: 0
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 1
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      properties:
+                        bucketing_version -1
+                        columns _col0,_col1
+                        columns.types int:bigint
+                        escape.delim \
+                        hive.serialization.extend.additional.nesting.levels true
+                        serialization.escape.crlf true
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  TotalFiles: 1
+                  GatherStats: false
+                  MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows) ORDER BY myCol
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows) ORDER BY myCol
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1
+2
+3
+PREHOOK: query: SELECT explode(array(1, 2, 3)) AS (myCol) FROM src tablesample (1 rows) ORDER BY myCol
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT explode(array(1, 2, 3)) AS (myCol) FROM src tablesample (1 rows) ORDER BY myCol
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1
+2
+3
+PREHOOK: query: SELECT a.myCol, count(1) FROM (SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows)) a GROUP BY a.myCol ORDER BY a.myCol
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a.myCol, count(1) FROM (SELECT explode(array(1, 2, 3)) AS myCol FROM src tablesample (1 rows)) a GROUP BY a.myCol ORDER BY a.myCol
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1	1
+2	1
+3	1
+PREHOOK: query: EXPLAIN EXTENDED SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: EXPLAIN EXTENDED SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, val
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Row Limit Per Split: 1
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Select Operator
+                    expressions: map(1:'one',2:'two',3:'three') (type: map<int,string>)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 259500 Basic stats: COMPLETE Column stats: COMPLETE
+                    UDTF Operator
+                      Statistics: Num rows: 500 Data size: 259500 Basic stats: COMPLETE Column stats: COMPLETE
+                      function name: explode
+                      Reduce Output Operator
+                        bucketingVersion: 2
+                        key expressions: key (type: int), value (type: string)
+                        null sort order: zz
+                        numBuckets: -1
+                        sort order: ++
+                        Statistics: Num rows: 500 Data size: 259500 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: -1
+                        auto parallelism: false
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    bucketing_version 2
+                    column.name.delimiter ,
+                    columns key,value
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucketing_version 2
+                      column.name.delimiter ,
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [src]
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  bucketingVersion: 2
+                  compressed: false
+                  GlobalTableId: 0
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 1
+                  Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      properties:
+                        bucketing_version -1
+                        columns _col0,_col1
+                        columns.types int:string
+                        escape.delim \
+                        hive.serialization.extend.additional.nesting.levels true
+                        serialization.escape.crlf true
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  TotalFiles: 1
+                  GatherStats: false
+                  MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN EXTENDED SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, value) a GROUP BY a.key, a.val ORDER BY a.key, a.val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: EXPLAIN EXTENDED SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, value) a GROUP BY a.key, a.val ORDER BY a.key, a.val
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Row Limit Per Split: 1
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Select Operator
+                    expressions: map(1:'one',2:'two',3:'three') (type: map<int,string>)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 259500 Basic stats: COMPLETE Column stats: COMPLETE
+                    UDTF Operator
+                      Statistics: Num rows: 500 Data size: 259500 Basic stats: COMPLETE Column stats: COMPLETE
+                      function name: explode
+                      Group By Operator
+                        aggregations: count(1)
+                        keys: key (type: int), value (type: string)
+                        minReductionHashAggr: 0.99
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          bucketingVersion: 2
+                          key expressions: _col0 (type: int), _col1 (type: string)
+                          null sort order: zz
+                          numBuckets: -1
+                          sort order: ++
+                          Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          tag: -1
+                          value expressions: _col2 (type: bigint)
+                          auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    bucketing_version 2
+                    column.name.delimiter ,
+                    columns key,value
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucketing_version 2
+                      column.name.delimiter ,
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [src]
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int), KEY._col1 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  bucketingVersion: 2
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  null sort order: zz
+                  numBuckets: -1
+                  sort order: ++
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  tag: -1
+                  value expressions: _col2 (type: bigint)
+                  auto parallelism: false
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: bigint)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  bucketingVersion: 2
+                  compressed: false
+                  GlobalTableId: 0
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 1
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      properties:
+                        bucketing_version -1
+                        columns _col0,_col1,_col2
+                        columns.types int:string:bigint
+                        escape.delim \
+                        hive.serialization.extend.additional.nesting.levels true
+                        serialization.escape.crlf true
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  TotalFiles: 1
+                  GatherStats: false
+                  MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, val
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1	one
+2	two
+3	three
+PREHOOK: query: SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, val) a GROUP BY a.key, a.val ORDER BY a.key, a.val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1, 'one', 2, 'two', 3, 'three')) AS (key, val) FROM src tablesample (1 rows) ORDER BY key, val) a GROUP BY a.key, a.val ORDER BY a.key, a.val
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1	one	1
+2	two	1
+3	three	1
+PREHOOK: query: drop table lazy_array_map
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table lazy_array_map
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table lazy_array_map (map_col map<int,string>, array_col array<string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@lazy_array_map
+POSTHOOK: query: create table lazy_array_map (map_col map<int,string>, array_col array<string>)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@lazy_array_map
+PREHOOK: query: INSERT OVERWRITE TABLE lazy_array_map select map(1, 'one', 2, 'two', 3, 'three'), array('100', '200', '300') FROM src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@lazy_array_map
+POSTHOOK: query: INSERT OVERWRITE TABLE lazy_array_map select map(1, 'one', 2, 'two', 3, 'three'), array('100', '200', '300') FROM src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@lazy_array_map
+POSTHOOK: Lineage: lazy_array_map.array_col EXPRESSION []
+POSTHOOK: Lineage: lazy_array_map.map_col EXPRESSION []
+PREHOOK: query: SELECT array_col, myCol FROM lazy_array_map lateral view explode(array_col) X AS myCol ORDER BY array_col, myCol
+PREHOOK: type: QUERY
+PREHOOK: Input: default@lazy_array_map
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT array_col, myCol FROM lazy_array_map lateral view explode(array_col) X AS myCol ORDER BY array_col, myCol
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@lazy_array_map
+#### A masked pattern was here ####
+["100","200","300"]	100
+["100","200","300"]	200
+["100","200","300"]	300
+PREHOOK: query: SELECT map_col, myKey, myValue FROM lazy_array_map lateral view explode(map_col) X AS myKey, myValue ORDER BY map_col, myKey, myValue
+PREHOOK: type: QUERY
+PREHOOK: Input: default@lazy_array_map
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT map_col, myKey, myValue FROM lazy_array_map lateral view explode(map_col) X AS myKey, myValue ORDER BY map_col, myKey, myValue
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@lazy_array_map
+#### A masked pattern was here ####
+{1:"one",2:"two",3:"three"}	1	one
+{1:"one",2:"two",3:"three"}	2	two
+{1:"one",2:"two",3:"three"}	3	three
diff --git a/ql/src/test/results/clientpositive/udf_factorial.q.out b/ql/src/test/results/clientpositive/llap/udf_factorial.q.out
similarity index 90%
rename from ql/src/test/results/clientpositive/udf_factorial.q.out
rename to ql/src/test/results/clientpositive/llap/udf_factorial.q.out
index d00f92c..74c8cd5 100644
--- a/ql/src/test/results/clientpositive/udf_factorial.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_factorial.q.out
@@ -33,11 +33,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 120L (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_find_in_set.q.out b/ql/src/test/results/clientpositive/llap/udf_find_in_set.q.out
similarity index 97%
rename from ql/src/test/results/clientpositive/udf_find_in_set.q.out
rename to ql/src/test/results/clientpositive/llap/udf_find_in_set.q.out
index e12c0bd..f166e89 100644
--- a/ql/src/test/results/clientpositive/udf_find_in_set.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_find_in_set.q.out
@@ -36,11 +36,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src1
-          Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: find_in_set(key, concat(key, ',', value)) (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: FROM src1 SELECT find_in_set(src1.key,concat(src1.key,',',src1.value))
diff --git a/ql/src/test/results/clientpositive/llap/udf_folder_constants.q.out b/ql/src/test/results/clientpositive/llap/udf_folder_constants.q.out
new file mode 100644
index 0000000..b7ee1ed
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/udf_folder_constants.q.out
@@ -0,0 +1,148 @@
+PREHOOK: query: drop table if exists udf_tb1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists udf_tb1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists udf_tb2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists udf_tb2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table udf_tb1 (year int, month int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@udf_tb1
+POSTHOOK: query: create table udf_tb1 (year int, month int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@udf_tb1
+PREHOOK: query: create table udf_tb2(month int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@udf_tb2
+POSTHOOK: query: create table udf_tb2(month int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@udf_tb2
+PREHOOK: query: insert into udf_tb1 values(2001, 11)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@udf_tb1
+POSTHOOK: query: insert into udf_tb1 values(2001, 11)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@udf_tb1
+POSTHOOK: Lineage: udf_tb1.month SCRIPT []
+POSTHOOK: Lineage: udf_tb1.year SCRIPT []
+PREHOOK: query: insert into udf_tb2 values(11)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@udf_tb2
+POSTHOOK: query: insert into udf_tb2 values(11)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@udf_tb2
+POSTHOOK: Lineage: udf_tb2.month SCRIPT []
+PREHOOK: query: explain
+select unix_timestamp(concat(a.year, '-01-01 00:00:00')) from (select * from udf_tb1 where year=2001) a join udf_tb2 b on (a.month=b.month)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@udf_tb1
+PREHOOK: Input: default@udf_tb2
+#### A masked pattern was here ####
+POSTHOOK: query: explain
+select unix_timestamp(concat(a.year, '-01-01 00:00:00')) from (select * from udf_tb1 where year=2001) a join udf_tb2 b on (a.month=b.month)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@udf_tb1
+POSTHOOK: Input: default@udf_tb2
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: udf_tb1
+                  filterExpr: ((year = 2001) and month is not null) (type: boolean)
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((year = 2001) and month is not null) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: month (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  filterExpr: month is not null (type: boolean)
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: month is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: month (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: 978336000L (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select unix_timestamp(concat(a.year, '-01-01 00:00:00')) from (select * from udf_tb1 where year=2001) a join udf_tb2 b on (a.month=b.month)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@udf_tb1
+PREHOOK: Input: default@udf_tb2
+#### A masked pattern was here ####
+POSTHOOK: query: select unix_timestamp(concat(a.year, '-01-01 00:00:00')) from (select * from udf_tb1 where year=2001) a join udf_tb2 b on (a.month=b.month)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@udf_tb1
+POSTHOOK: Input: default@udf_tb2
+#### A masked pattern was here ####
+978336000
diff --git a/ql/src/test/results/clientpositive/udf_format_number.q.out b/ql/src/test/results/clientpositive/llap/udf_format_number.q.out
similarity index 98%
rename from ql/src/test/results/clientpositive/udf_format_number.q.out
rename to ql/src/test/results/clientpositive/llap/udf_format_number.q.out
index 7ca7657..83a9f90 100644
--- a/ql/src/test/results/clientpositive/udf_format_number.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_format_number.q.out
@@ -50,11 +50,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: '12,332.1235' (type: string), '12,332.1000' (type: string), '12,332' (type: string), '12332.2' (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 500 Data size: 185500 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT format_number(12332.123456, 4),
diff --git a/ql/src/test/results/clientpositive/udf_from_utc_timestamp.q.out b/ql/src/test/results/clientpositive/llap/udf_from_utc_timestamp.q.out
similarity index 96%
rename from ql/src/test/results/clientpositive/udf_from_utc_timestamp.q.out
rename to ql/src/test/results/clientpositive/llap/udf_from_utc_timestamp.q.out
index b396773..aa6a5e7 100644
--- a/ql/src/test/results/clientpositive/udf_from_utc_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_from_utc_timestamp.q.out
@@ -29,11 +29,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: TIMESTAMP'2012-02-11 02:30:00' (type: timestamp)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_get_json_object.q.out b/ql/src/test/results/clientpositive/llap/udf_get_json_object.q.out
similarity index 98%
rename from ql/src/test/results/clientpositive/udf_get_json_object.q.out
rename to ql/src/test/results/clientpositive/llap/udf_get_json_object.q.out
index fd2d559..d2ae71c 100644
--- a/ql/src/test/results/clientpositive/udf_get_json_object.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_get_json_object.q.out
@@ -62,11 +62,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src_json
-          Statistics: Num rows: 1 Data size: 728 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
 #### A masked pattern was here ####
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT get_json_object(src_json.json, '$') FROM src_json
diff --git a/ql/src/test/results/clientpositive/udf_greatest.q.out b/ql/src/test/results/clientpositive/llap/udf_greatest.q.out
similarity index 97%
rename from ql/src/test/results/clientpositive/udf_greatest.q.out
rename to ql/src/test/results/clientpositive/llap/udf_greatest.q.out
index be58652..cbd3358 100644
--- a/ql/src/test/results/clientpositive/udf_greatest.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_greatest.q.out
@@ -62,11 +62,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'c' (type: string), 'a' (type: string), 'AaA' (type: string), 'AAA' (type: string), '13' (type: string), '2' (type: string), '03' (type: string), '1' (type: string), null (type: double), null (type: double), null (type: double), null (type: double), null (type: double), null (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-            Statistics: Num rows: 500 Data size: 343048 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT GREATEST('a', 'b', 'c'),
diff --git a/ql/src/test/results/clientpositive/udf_hash.q.out b/ql/src/test/results/clientpositive/llap/udf_hash.q.out
similarity index 93%
rename from ql/src/test/results/clientpositive/udf_hash.q.out
rename to ql/src/test/results/clientpositive/llap/udf_hash.q.out
index 9b6a5ac..a793d84 100644
--- a/ql/src/test/results/clientpositive/udf_hash.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_hash.q.out
@@ -41,11 +41,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 1 (type: int), 2 (type: int), 3 (type: int), -1097262584 (type: int), 1067450368 (type: int), 1076887552 (type: int), 51508 (type: int), 96354 (type: int), 1 (type: int), 0 (type: int), 1026 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-            Statistics: Num rows: 500 Data size: 22000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT hash(CAST(1 AS TINYINT)), hash(CAST(2 AS SMALLINT)),
diff --git a/ql/src/test/results/clientpositive/udf_hour.q.out b/ql/src/test/results/clientpositive/llap/udf_hour.q.out
similarity index 89%
rename from ql/src/test/results/clientpositive/udf_hour.q.out
rename to ql/src/test/results/clientpositive/llap/udf_hour.q.out
index 3eca55d..c12117e 100644
--- a/ql/src/test/results/clientpositive/udf_hour.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_hour.q.out
@@ -41,14 +41,11 @@ STAGE PLANS:
         TableScan
           alias: src
           filterExpr: (UDFToDouble(key) = 86.0D) (type: boolean)
-          Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (UDFToDouble(key) = 86.0D) (type: boolean)
-            Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: 13 (type: int), 13 (type: int), 0 (type: int)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 250 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT hour('2009-08-07 13:14:15'), hour('13:14:15'), hour('2009-08-07')
diff --git a/ql/src/test/results/clientpositive/udf_if.q.out b/ql/src/test/results/clientpositive/llap/udf_if.q.out
similarity index 92%
rename from ql/src/test/results/clientpositive/udf_if.q.out
rename to ql/src/test/results/clientpositive/llap/udf_if.q.out
index e6615a4..a9f5a8d 100644
--- a/ql/src/test/results/clientpositive/udf_if.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_if.q.out
@@ -43,11 +43,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 1 (type: int), '1' (type: string), 1 (type: int), 1 (type: int), null (type: int), 2 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-            Statistics: Num rows: 500 Data size: 50504 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT IF(TRUE, 1, 2) AS COL1,
@@ -100,11 +98,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 128S (type: smallint), 1.1 (type: decimal(11,1)), 'ABC' (type: string), '12.3' (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 500 Data size: 145500 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT IF(TRUE, CAST(128 AS SMALLINT), CAST(1 AS TINYINT)) AS COL1,
diff --git a/ql/src/test/results/clientpositive/udf_in_file.q.out b/ql/src/test/results/clientpositive/llap/udf_in_file.q.out
similarity index 69%
rename from ql/src/test/results/clientpositive/udf_in_file.q.out
rename to ql/src/test/results/clientpositive/llap/udf_in_file.q.out
index 9a277d0..6af4ea1 100644
--- a/ql/src/test/results/clientpositive/udf_in_file.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_in_file.q.out
@@ -52,37 +52,21 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@value_src
 #### A masked pattern was here ####
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: value_src
-            Statistics: Num rows: 1 Data size: 730 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: in_file(str_val, '../../data/files/test2.dat') (type: boolean), in_file(ch_val, '../../data/files/test2.dat') (type: boolean), in_file(vch_val, '../../data/files/test2.dat') (type: boolean), in_file(str_val_neg, '../../data/files/test2.dat') (type: boolean), in_file(ch_val_neg, '../../data/files/test2.dat') (type: boolean), in_file(vch_val_neg, '../../data/files/test2.dat') (type: boolean), in_file('303', '../../data/files/test2.dat') (type: boolean), in_file(' [...]
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-              Statistics: Num rows: 1 Data size: 730 Basic stats: COMPLETE Column stats: NONE
-              Limit
-                Number of rows: 1
-                Statistics: Num rows: 1 Data size: 730 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 730 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
-
   Stage: Stage-0
     Fetch Operator
       limit: 1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: value_src
+          Select Operator
+            expressions: in_file(str_val, '../../data/files/test2.dat') (type: boolean), in_file(ch_val, '../../data/files/test2.dat') (type: boolean), in_file(vch_val, '../../data/files/test2.dat') (type: boolean), in_file(str_val_neg, '../../data/files/test2.dat') (type: boolean), in_file(ch_val_neg, '../../data/files/test2.dat') (type: boolean), in_file(vch_val_neg, '../../data/files/test2.dat') (type: boolean), in_file('303', '../../data/files/test2.dat') (type: boolean), in_file('30 [...]
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+            Limit
+              Number of rows: 1
+              ListSink
 
 PREHOOK: query: SELECT in_file(str_val, "../../data/files/test2.dat"),
        in_file(ch_val, "../../data/files/test2.dat"),
diff --git a/ql/src/test/results/clientpositive/udf_inline.q.out b/ql/src/test/results/clientpositive/llap/udf_inline.q.out
similarity index 84%
rename from ql/src/test/results/clientpositive/udf_inline.q.out
rename to ql/src/test/results/clientpositive/llap/udf_inline.q.out
index 12422a4..d93bc85 100644
--- a/ql/src/test/results/clientpositive/udf_inline.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_inline.q.out
@@ -33,21 +33,16 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: array(const struct(1,'dude!'),const struct(2,'Wheres'),const struct(3,'my car?')) (type: array<struct<col1:int,col2:string>>)
             outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 32000 Basic stats: COMPLETE Column stats: COMPLETE
             UDTF Operator
-              Statistics: Num rows: 500 Data size: 32000 Basic stats: COMPLETE Column stats: COMPLETE
               function name: inline
               Select Operator
                 expressions: col1 (type: int), col2 (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
                 Limit
                   Number of rows: 2
-                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                   ListSink
 
 PREHOOK: query: SELECT inline( 
diff --git a/ql/src/test/results/clientpositive/udf_instr.q.out b/ql/src/test/results/clientpositive/llap/udf_instr.q.out
similarity index 94%
rename from ql/src/test/results/clientpositive/udf_instr.q.out
rename to ql/src/test/results/clientpositive/llap/udf_instr.q.out
index 67dd28c..9c59287 100644
--- a/ql/src/test/results/clientpositive/udf_instr.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_instr.q.out
@@ -60,11 +60,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 1 (type: int), 0 (type: int), 2 (type: int), 2 (type: int), 0 (type: int), 0 (type: int), 2 (type: int), 3 (type: int), 4 (type: int), 2 (type: int), 3 (type: int), null (type: int), null (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-            Statistics: Num rows: 500 Data size: 22008 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT instr('abcd', 'abc'),
diff --git a/ql/src/test/results/clientpositive/udf_isnull_isnotnull.q.out b/ql/src/test/results/clientpositive/llap/udf_isnull_isnotnull.q.out
similarity index 86%
rename from ql/src/test/results/clientpositive/udf_isnull_isnotnull.q.out
rename to ql/src/test/results/clientpositive/llap/udf_isnull_isnotnull.q.out
index 7e3642e..2654d71 100644
--- a/ql/src/test/results/clientpositive/udf_isnull_isnotnull.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_isnull_isnotnull.q.out
@@ -50,14 +50,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: true (type: boolean), true (type: boolean), true (type: boolean)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 500 Data size: 6000 Basic stats: COMPLETE Column stats: COMPLETE
             Limit
               Number of rows: 1
-              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT NULL IS NULL,
@@ -108,17 +105,13 @@ STAGE PLANS:
         TableScan
           alias: src_thrift
           filterExpr: (lint is not null and mstringstring is not null) (type: boolean)
-          Statistics: Num rows: 11 Data size: 39600 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: (lint is not null and mstringstring is not null) (type: boolean)
-            Statistics: Num rows: 11 Data size: 39600 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: lint is not null (type: boolean), lintstring is not null (type: boolean), mstringstring is not null (type: boolean)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 11 Data size: 39600 Basic stats: COMPLETE Column stats: NONE
               Limit
                 Number of rows: 1
-                Statistics: Num rows: 1 Data size: 3600 Basic stats: COMPLETE Column stats: NONE
                 ListSink
 
 PREHOOK: query: FROM src_thrift
diff --git a/ql/src/test/results/clientpositive/llap/udf_isops_simplify.q.out b/ql/src/test/results/clientpositive/llap/udf_isops_simplify.q.out
new file mode 100644
index 0000000..e7ccd7b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/udf_isops_simplify.q.out
@@ -0,0 +1,422 @@
+PREHOOK: query: create table t (a integer)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t
+POSTHOOK: query: create table t (a integer)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t
+PREHOOK: query: explain select not ((a>0) is not true) from t group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: explain select not ((a>0) is not true) from t group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: a (type: int)
+                    outputColumnNames: a
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: a (type: int)
+                      minReductionHashAggr: 0.99
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: (_col0 > 0) is true (type: boolean)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select not ((a>0) is not false) from t group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: explain select not ((a>0) is not false) from t group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: a (type: int)
+                    outputColumnNames: a
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: a (type: int)
+                      minReductionHashAggr: 0.99
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: (_col0 > 0) is false (type: boolean)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select not ((a>0) is not null) from t group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: explain select not ((a>0) is not null) from t group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: a (type: int)
+                    outputColumnNames: a
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: a (type: int)
+                      minReductionHashAggr: 0.99
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 is null (type: boolean)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select not ((a>0) is true) from t group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: explain select not ((a>0) is true) from t group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: a (type: int)
+                    outputColumnNames: a
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: a (type: int)
+                      minReductionHashAggr: 0.99
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: (_col0 > 0) is not true (type: boolean)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select not ((a>0) is false) from t group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: explain select not ((a>0) is false) from t group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: a (type: int)
+                    outputColumnNames: a
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: a (type: int)
+                      minReductionHashAggr: 0.99
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: (_col0 > 0) is not false (type: boolean)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select not ((a>0) is null) from t group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: explain select not ((a>0) is null) from t group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: a (type: int)
+                    outputColumnNames: a
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: a (type: int)
+                      minReductionHashAggr: 0.99
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 is not null (type: boolean)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
diff --git a/ql/src/test/results/clientpositive/udf_java_method.q.out b/ql/src/test/results/clientpositive/llap/udf_java_method.q.out
similarity index 94%
rename from ql/src/test/results/clientpositive/udf_java_method.q.out
rename to ql/src/test/results/clientpositive/llap/udf_java_method.q.out
index 71de4a1..1389c86 100644
--- a/ql/src/test/results/clientpositive/udf_java_method.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_java_method.q.out
@@ -48,12 +48,10 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: reflect('java.lang.String','valueOf',1) (type: string), reflect('java.lang.String','isEmpty') (type: string), reflect('java.lang.Math','max',2,3) (type: string), reflect('java.lang.Math','min',2,3) (type: string), reflect('java.lang.Math','round',2.5D) (type: string), round(reflect('java.lang.Math','exp',1.0D), 6) (type: double), reflect('java.lang.Math','floor',1.9D) (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-            Statistics: Num rows: 500 Data size: 556000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT java_method("java.lang.String", "valueOf", 1),
diff --git a/ql/src/test/results/clientpositive/udf_last_day.q.out b/ql/src/test/results/clientpositive/llap/udf_last_day.q.out
similarity index 96%
rename from ql/src/test/results/clientpositive/udf_last_day.q.out
rename to ql/src/test/results/clientpositive/llap/udf_last_day.q.out
index 716e39b..f957597 100644
--- a/ql/src/test/results/clientpositive/udf_last_day.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_last_day.q.out
@@ -33,11 +33,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: '2015-02-28' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_least.q.out b/ql/src/test/results/clientpositive/llap/udf_least.q.out
similarity index 97%
rename from ql/src/test/results/clientpositive/udf_least.q.out
rename to ql/src/test/results/clientpositive/llap/udf_least.q.out
index bc867dc..11a7487 100644
--- a/ql/src/test/results/clientpositive/udf_least.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_least.q.out
@@ -62,11 +62,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'a' (type: string), 'B' (type: string), 'AAA' (type: string), 'A' (type: string), '11' (type: string), '11' (type: string), '01' (type: string), '01' (type: string), null (type: double), null (type: double), null (type: double), null (type: double), null (type: double), null (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-            Statistics: Num rows: 500 Data size: 343048 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT LEAST('a', 'b', 'c'),
diff --git a/ql/src/test/results/clientpositive/udf_length.q.out b/ql/src/test/results/clientpositive/llap/udf_length.q.out
similarity index 55%
rename from ql/src/test/results/clientpositive/udf_length.q.out
rename to ql/src/test/results/clientpositive/llap/udf_length.q.out
index f019db9..7a5bfba 100644
--- a/ql/src/test/results/clientpositive/udf_length.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_length.q.out
@@ -31,70 +31,70 @@ POSTHOOK: Input: default@src1
 POSTHOOK: Output: default@dest1_n134
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src1
-            Statistics: Num rows: 25 Data size: 2225 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: length(value) (type: int)
-              outputColumnNames: _col0
-              Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.dest1_n134
-              Select Operator
-                expressions: _col0 (type: int)
-                outputColumnNames: len
-                Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: compute_stats(len, 'hll')
-                  minReductionHashAggr: 0.99
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    null sort order: 
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: compute_stats(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  Statistics: Num rows: 25 Data size: 2225 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: length(value) (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.dest1_n134
+                    Select Operator
+                      expressions: _col0 (type: int)
+                      outputColumnNames: len
+                      Statistics: Num rows: 25 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: compute_stats(len, 'hll')
+                        minReductionHashAggr: 0.96
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          null sort order: 
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: compute_stats(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-2
+    Dependency Collection
 
   Stage: Stage-0
     Move Operator
@@ -106,7 +106,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.dest1_n134
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats Work
       Basic Stats Work:
       Column Stats Desc:
@@ -114,36 +114,6 @@ STAGE PLANS:
           Column Types: int
           Table: default.dest1_n134
 
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.dest1_n134
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.dest1_n134
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
 PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1_n134 SELECT length(src1.value)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
@@ -228,11 +198,9 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: dest1_n134
-          Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: length(name) (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: SELECT length(dest1_n134.name) FROM dest1_n134
diff --git a/ql/src/test/results/clientpositive/udf_levenshtein.q.out b/ql/src/test/results/clientpositive/llap/udf_levenshtein.q.out
similarity index 93%
rename from ql/src/test/results/clientpositive/udf_levenshtein.q.out
rename to ql/src/test/results/clientpositive/llap/udf_levenshtein.q.out
index 7b8047b..a621748 100644
--- a/ql/src/test/results/clientpositive/udf_levenshtein.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_levenshtein.q.out
@@ -32,11 +32,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 1 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select
diff --git a/ql/src/test/results/clientpositive/udf_like.q.out b/ql/src/test/results/clientpositive/llap/udf_like.q.out
similarity index 94%
rename from ql/src/test/results/clientpositive/udf_like.q.out
rename to ql/src/test/results/clientpositive/llap/udf_like.q.out
index 8a0bb98..3c86fa7 100644
--- a/ql/src/test/results/clientpositive/udf_like.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_like.q.out
@@ -40,14 +40,11 @@ STAGE PLANS:
         TableScan
           alias: src
           filterExpr: (UDFToDouble(key) = 86.0D) (type: boolean)
-          Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (UDFToDouble(key) = 86.0D) (type: boolean)
-            Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: true (type: boolean), false (type: boolean), true (type: boolean), true (type: boolean), false (type: boolean), false (type: boolean), false (type: boolean), false (type: boolean), true (type: boolean), false (type: boolean), false (type: boolean), false (type: boolean), false (type: boolean), true (type: boolean)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-              Statistics: Num rows: 250 Data size: 14000 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT '_%_' LIKE '%\_\%\_%', '__' LIKE '%\_\%\_%', '%%_%_' LIKE '%\_\%\_%', '%_%_%' LIKE '%\%\_\%',
diff --git a/ql/src/test/results/clientpositive/udf_locate.q.out b/ql/src/test/results/clientpositive/llap/udf_locate.q.out
similarity index 95%
rename from ql/src/test/results/clientpositive/udf_locate.q.out
rename to ql/src/test/results/clientpositive/llap/udf_locate.q.out
index cb3f63b..f361c16 100644
--- a/ql/src/test/results/clientpositive/udf_locate.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_locate.q.out
@@ -69,11 +69,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 1 (type: int), 0 (type: int), 2 (type: int), 2 (type: int), 4 (type: int), 4 (type: int), 0 (type: int), 0 (type: int), 2 (type: int), 3 (type: int), 4 (type: int), 2 (type: int), 3 (type: int), null (type: int), null (type: int), 0 (type: int), 0 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
-            Statistics: Num rows: 500 Data size: 30008 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT locate('abc', 'abcd'),
diff --git a/ql/src/test/results/clientpositive/udf_lower.q.out b/ql/src/test/results/clientpositive/llap/udf_lower.q.out
similarity index 52%
rename from ql/src/test/results/clientpositive/udf_lower.q.out
rename to ql/src/test/results/clientpositive/llap/udf_lower.q.out
index 3e3a1e2..53e3de6 100644
--- a/ql/src/test/results/clientpositive/udf_lower.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_lower.q.out
@@ -25,38 +25,22 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            filterExpr: (UDFToDouble(key) = 86.0D) (type: boolean)
-            Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: (UDFToDouble(key) = 86.0D) (type: boolean)
-              Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE
-              Select Operator
-                expressions: 'abc 123' (type: string), 'ABC 123' (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 250 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 250 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: src
+          filterExpr: (UDFToDouble(key) = 86.0D) (type: boolean)
+          Filter Operator
+            predicate: (UDFToDouble(key) = 86.0D) (type: boolean)
+            Select Operator
+              expressions: 'abc 123' (type: string), 'ABC 123' (type: string)
+              outputColumnNames: _col0, _col1
+              ListSink
 
 PREHOOK: query: SELECT lower('AbC 123'), upper('AbC 123') FROM src WHERE key = 86
 PREHOOK: type: QUERY
diff --git a/ql/src/test/results/clientpositive/udf_lpad.q.out b/ql/src/test/results/clientpositive/llap/udf_lpad.q.out
similarity index 91%
rename from ql/src/test/results/clientpositive/udf_lpad.q.out
rename to ql/src/test/results/clientpositive/llap/udf_lpad.q.out
index f7a316e..5ebfea2 100644
--- a/ql/src/test/results/clientpositive/udf_lpad.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_lpad.q.out
@@ -46,11 +46,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'h' (type: string), '...hi' (type: string), '1231hi' (type: string)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 500 Data size: 132000 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT
diff --git a/ql/src/test/results/clientpositive/udf_map.q.out b/ql/src/test/results/clientpositive/llap/udf_map.q.out
similarity index 92%
rename from ql/src/test/results/clientpositive/udf_map.q.out
rename to ql/src/test/results/clientpositive/llap/udf_map.q.out
index 702feb2..eb47453 100644
--- a/ql/src/test/results/clientpositive/udf_map.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_map.q.out
@@ -31,11 +31,9 @@ STAGE PLANS:
         TableScan
           alias: src
           Row Limit Per Split: 1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: map() (type: map<string,string>), map(1:'a',2:'b',3:'c') (type: map<int,string>), map(1:2,'a':'b') (type: map<string,string>), map(1:'a',2:'b',3:'c')[2] (type: string), map(1:2,'a':'b')['a'] (type: string), map(1:array('a'))[1][0] (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-            Statistics: Num rows: 500 Data size: 857500 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: SELECT map(), map(1, "a", 2, "b", 3, "c"), map(1, 2, "a", "b"), 
diff --git a/ql/src/test/results/clientpositive/udf_mask.q.out b/ql/src/test/results/clientpositive/llap/udf_mask.q.out
similarity index 96%
rename from ql/src/test/results/clientpositive/udf_mask.q.out
rename to ql/src/test/results/clientpositive/llap/udf_mask.q.out
index f3a5f26..31140d3 100644
--- a/ql/src/test/results/clientpositive/udf_mask.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_mask.q.out
@@ -45,11 +45,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'XxxxXxxxxx1000' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select mask('TestString-123', 'X', 'x', '0', ':'),
diff --git a/ql/src/test/results/clientpositive/udf_mask_first_n.q.out b/ql/src/test/results/clientpositive/llap/udf_mask_first_n.q.out
similarity index 95%
rename from ql/src/test/results/clientpositive/udf_mask_first_n.q.out
rename to ql/src/test/results/clientpositive/llap/udf_mask_first_n.q.out
index 107461b..9aa9743 100644
--- a/ql/src/test/results/clientpositive/udf_mask_first_n.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_mask_first_n.q.out
@@ -42,11 +42,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'XxxxString-123' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select mask_first_n('TestString-123', 4, 'X', 'x', '0', ':'),
diff --git a/ql/src/test/results/clientpositive/udf_mask_hash.q.out b/ql/src/test/results/clientpositive/llap/udf_mask_hash.q.out
similarity index 93%
rename from ql/src/test/results/clientpositive/udf_mask_hash.q.out
rename to ql/src/test/results/clientpositive/llap/udf_mask_hash.q.out
index 6d79a56..e489f2e 100644
--- a/ql/src/test/results/clientpositive/udf_mask_hash.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_mask_hash.q.out
@@ -33,11 +33,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: '8b44d559dc5d60e4453c9b4edf2a455fbce054bb8504cd3eb9b5f391bd239c90' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 148 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select mask_hash('TestString-123'),
diff --git a/ql/src/test/results/clientpositive/udf_mask_last_n.q.out b/ql/src/test/results/clientpositive/llap/udf_mask_last_n.q.out
similarity index 95%
rename from ql/src/test/results/clientpositive/udf_mask_last_n.q.out
rename to ql/src/test/results/clientpositive/llap/udf_mask_last_n.q.out
index 2119084..34dcd5b 100644
--- a/ql/src/test/results/clientpositive/udf_mask_last_n.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_mask_last_n.q.out
@@ -42,11 +42,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'TestString1000' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select mask_last_n('TestString-123', 4, 'X', 'x', '0', ':'),
diff --git a/ql/src/test/results/clientpositive/udf_mask_show_first_n.q.out b/ql/src/test/results/clientpositive/llap/udf_mask_show_first_n.q.out
similarity index 95%
rename from ql/src/test/results/clientpositive/udf_mask_show_first_n.q.out
rename to ql/src/test/results/clientpositive/llap/udf_mask_show_first_n.q.out
index 918c5b8..332dacb 100644
--- a/ql/src/test/results/clientpositive/udf_mask_show_first_n.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_mask_show_first_n.q.out
@@ -42,11 +42,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'TestXxxxxx1000' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select mask_show_first_n('TestString-123', 4, 'X', 'x', '0', ':'),
diff --git a/ql/src/test/results/clientpositive/udf_mask_show_last_n.q.out b/ql/src/test/results/clientpositive/llap/udf_mask_show_last_n.q.out
similarity index 95%
rename from ql/src/test/results/clientpositive/udf_mask_show_last_n.q.out
rename to ql/src/test/results/clientpositive/llap/udf_mask_show_last_n.q.out
index af06ba7..4c812a2 100644
--- a/ql/src/test/results/clientpositive/udf_mask_show_last_n.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_mask_show_last_n.q.out
@@ -42,11 +42,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: 'XxxxXxxxxx-123' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: select mask_show_last_n('TestString-123', 4, 'X', 'x', '0', ':'),
diff --git a/ql/src/test/results/clientpositive/udf_md5.q.out b/ql/src/test/results/clientpositive/llap/udf_md5.q.out
similarity index 91%
rename from ql/src/test/results/clientpositive/udf_md5.q.out
rename to ql/src/test/results/clientpositive/llap/udf_md5.q.out
index 16a949e..4d3147b 100644
--- a/ql/src/test/results/clientpositive/udf_md5.q.out
+++ b/ql/src/test/results/clientpositive/llap/udf_md5.q.out
@@ -35,11 +35,9 @@ STAGE PLANS:
         TableScan
           alias: _dummy_table
           Row Limit Per Split: 1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: '902fbdd2b1df0c4f70b4a5d23525e932' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
... 76919 lines suppressed ...