You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/01/13 02:14:16 UTC
[23/23] hive git commit: HIVE-18416: Initial support for TABLE
function (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
HIVE-18416: Initial support for TABLE function (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7e64114d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7e64114d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7e64114d
Branch: refs/heads/master
Commit: 7e64114ddca5c07a0c4ac332c1b34b534cc2e9ed
Parents: 0a62507
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed Jan 10 14:03:16 2018 -0800
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Jan 12 18:13:35 2018 -0800
----------------------------------------------------------------------
.../test/results/positive/accumulo_index.q.out | 2 +
.../results/positive/hbase_binary_binary.q.out | 4 +
.../positive/hbase_decimal_decimal.q.out | Bin 1667 -> 1758 bytes
.../positive/hbase_tablename_property.q.out | 2 +
.../insert_into_dynamic_partitions.q.out | 122 +--
.../clientpositive/insert_into_table.q.out | 156 ++--
.../insert_overwrite_directory.q.out | 12 +-
.../insert_overwrite_dynamic_partitions.q.out | 122 +--
...verwrite_dynamic_partitions_merge_move.q.out | 10 +-
...verwrite_dynamic_partitions_merge_only.q.out | 10 +-
...overwrite_dynamic_partitions_move_only.q.out | 10 +-
.../clientpositive/insert_overwrite_table.q.out | 156 ++--
.../test/resources/testconfiguration.properties | 4 +-
.../hadoop/hive/ql/exec/FunctionRegistry.java | 9 +
.../translator/PlanModifierForReturnPath.java | 1 +
.../hadoop/hive/ql/parse/FromClauseParser.g | 64 +-
.../apache/hadoop/hive/ql/parse/HiveParser.g | 13 +-
.../hadoop/hive/ql/parse/IdentifiersParser.g | 22 +-
.../org/apache/hadoop/hive/ql/parse/QB.java | 18 -
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 212 -----
.../hive/ql/lockmgr/TestDbTxnManager2.java | 54 +-
.../apache/hadoop/hive/ql/parse/TestIUD.java | 136 ----
.../hive/ql/parse/TestMergeStatement.java | 6 +-
.../clientpositive/encryption_insert_values.q | 3 -
.../clientpositive/materialized_view_create.q | 2 +-
.../test/queries/clientpositive/tablevalues.q | 94 +++
.../authorization_insertpart_noinspriv.q.out | 2 +
.../distinct_windowing_failure2.q.out | 2 +-
.../insert_into_with_schema.q.out | 2 +-
.../insert_into_with_schema1.q.out | 2 +-
.../results/clientnegative/insert_sorted.q.out | 6 +-
...zed_view_authorization_create_no_grant.q.out | 8 +-
...ew_authorization_create_no_select_perm.q.out | 8 +-
...rialized_view_authorization_drop_other.q.out | 8 +-
...ized_view_authorization_no_select_perm.q.out | 8 +-
...ed_view_authorization_rebuild_no_grant.q.out | 8 +-
...lized_view_authorization_rebuild_other.q.out | 8 +-
.../clientnegative/materialized_view_drop.q.out | 8 +-
.../materialized_view_drop2.q.out | 8 +-
.../materialized_view_replace_with_view.q.out | 8 +-
...names_with_leading_and_trailing_spaces.q.out | 4 +-
.../clientnegative/subquery_corr_in_agg.q.out | 12 +-
.../subquery_in_implicit_gby.q.out | 12 +-
.../subquery_notin_implicit_gby.q.out | 12 +-
.../clientnegative/udf_assert_true2.q.out | 16 +-
.../clientpositive/acid_insert_overwrite.q.out | 68 +-
.../test/results/clientpositive/acid_join.q.out | 12 +-
.../results/clientpositive/acid_mapjoin.q.out | 12 +-
.../results/clientpositive/acid_subquery.q.out | 14 +-
.../clientpositive/acid_vectorization.q.out | 12 +-
.../clientpositive/acid_view_delete.q.out | 16 +-
.../clientpositive/alterColumnStats.q.out | 8 +-
.../clientpositive/alterColumnStatsPart.q.out | 24 +-
.../alter_table_add_partition.q.out | 4 +-
.../alter_table_partition_drop.q.out | 10 +-
.../alter_table_update_status.q.out | 60 +-
..._table_update_status_disable_bitvector.q.out | 60 +-
.../clientpositive/alter_view_col_type.q.out | 40 +-
.../clientpositive/analyze_tbl_date.q.out | 4 +-
.../clientpositive/array_size_estimation.q.out | 4 +-
.../clientpositive/autoColumnStats_10.q.out | 32 +-
.../clientpositive/autoColumnStats_5.q.out | 248 +++---
.../clientpositive/autoColumnStats_5a.q.out | 396 +++++-----
.../results/clientpositive/avrotblsjoin.q.out | 14 +-
.../clientpositive/basicstat_partval.q.out | 8 +-
.../clientpositive/beeline/mapjoin2.q.out | 12 +-
.../materialized_view_create_rewrite.q.out | 10 +-
.../bucket_num_reducers_acid.q.out | 2 +
.../bucket_num_reducers_acid2.q.out | 4 +
.../results/clientpositive/cbo_rp_insert.q.out | 83 --
.../clientpositive/cmdwithcomments.q.out | 8 +-
.../columnStatsUpdateForStatsOptimizer_2.q.out | 6 +-
...names_with_leading_and_trailing_spaces.q.out | 12 +-
.../clientpositive/comma_in_column_name.q.out | 8 +-
.../test/results/clientpositive/concat_op.q.out | 8 +-
.../clientpositive/constantPropWhen.q.out | 6 +-
.../clientpositive/constantfolding.q.out | 20 +-
ql/src/test/results/clientpositive/cte_5.q.out | 7 +-
ql/src/test/results/clientpositive/cte_7.q.out | 8 +-
.../test/results/clientpositive/cte_mat_4.q.out | 10 +-
.../test/results/clientpositive/cte_mat_5.q.out | 7 +-
.../clientpositive/dbtxnmgr_showlocks.q.out | 6 +-
.../clientpositive/decimal_precision.q.out | 4 +-
.../results/clientpositive/deleteAnalyze.q.out | 10 +-
.../clientpositive/druid/druidmini_mv.q.out | 20 +-
.../encrypted/encryption_drop_partition.q.out | 10 +-
.../encryption_insert_partition_dynamic.q.out | 6 +-
.../encryption_insert_partition_static.q.out | 6 +-
.../encrypted/encryption_insert_values.q.out | 33 +-
.../test/results/clientpositive/equal_ns.q.out | 6 +-
.../results/clientpositive/except_all.q.out | 20 +-
.../explaindenpendencydiffengs.q.out | 4 +-
.../extrapolate_part_stats_date.q.out | 16 +-
.../test/results/clientpositive/fm-sketch.q.out | 20 +-
.../clientpositive/folder_predicate.q.out | 4 +-
.../results/clientpositive/groupby_empty.q.out | 4 +-
.../clientpositive/groupby_nullvalues.q.out | 10 +-
.../clientpositive/groupby_rollup_empty.q.out | 8 +-
ql/src/test/results/clientpositive/hll.q.out | 20 +-
.../clientpositive/implicit_decimal.q.out | 4 +-
.../results/clientpositive/innerjoin1.q.out | 20 +-
.../results/clientpositive/insert_into1.q.out | 40 +-
.../results/clientpositive/insert_into2.q.out | 6 +-
.../insert_into_with_schema2.q.out | 18 +-
.../insert_nonacid_from_acid.q.out | 8 +-
.../insert_values_acid_not_bucketed.q.out | 6 +-
.../insert_values_dynamic_partitioned.q.out | 14 +-
.../insert_values_non_partitioned.q.out | 28 +-
.../clientpositive/insert_values_nonascii.q.out | 6 +-
.../insert_values_partitioned.q.out | 26 +-
.../insert_values_tmp_table.q.out | 8 +-
.../clientpositive/insertoverwrite_bucket.q.out | 12 +-
.../clientpositive/insertvalues_espchars.q.out | 4 +-
.../results/clientpositive/interval_alt.q.out | 4 +-
ql/src/test/results/clientpositive/join42.q.out | 16 +-
ql/src/test/results/clientpositive/join43.q.out | 148 ++--
ql/src/test/results/clientpositive/join46.q.out | 16 +-
.../join_cond_pushdown_unqual5.q.out | 18 +-
.../clientpositive/join_emit_interval.q.out | 16 +-
.../llap/acid_bucket_pruning.q.out | 4 +-
.../clientpositive/llap/acid_no_buckets.q.out | 24 +-
.../llap/acid_vectorization_original.q.out | 4 +-
.../llap/authorization_view_8.q.out | 6 +-
.../llap/autoColumnStats_10.q.out | 32 +-
.../llap/bucket_map_join_tez1.q.out | 282 +++----
.../llap/bucket_map_join_tez2.q.out | 12 +-
.../clientpositive/llap/cbo_rp_lineage2.q.out | 675 ----------------
.../columnStatsUpdateForStatsOptimizer_1.q.out | 20 +-
...names_with_leading_and_trailing_spaces.q.out | 12 +-
.../llap/column_table_stats_orc.q.out | 30 +-
.../llap/constprog_semijoin.q.out | 20 +-
.../results/clientpositive/llap/cte_5.q.out | 7 +-
.../results/clientpositive/llap/cte_mat_4.q.out | 10 +-
.../results/clientpositive/llap/cte_mat_5.q.out | 7 +-
.../clientpositive/llap/deleteAnalyze.q.out | 10 +-
.../llap/dynamic_semijoin_reduction_3.q.out | 28 +-
.../llap/dynpart_sort_optimization_acid.q.out | 30 +-
.../clientpositive/llap/except_distinct.q.out | 20 +-
.../llap/groupby_rollup_empty.q.out | 8 +-
.../clientpositive/llap/insert_into1.q.out | 40 +-
.../clientpositive/llap/insert_into2.q.out | 6 +-
.../llap/insert_into_with_schema.q.out | 48 +-
.../insert_values_dynamic_partitioned.q.out | 14 +-
.../llap/insert_values_non_partitioned.q.out | 28 +-
.../llap/insert_values_orig_table.q.out | 26 +-
.../llap/insert_values_partitioned.q.out | 26 +-
.../llap/insert_values_tmp_table.q.out | 8 +-
.../clientpositive/llap/intersect_all.q.out | 12 +-
.../llap/intersect_distinct.q.out | 12 +-
.../clientpositive/llap/intersect_merge.q.out | 12 +-
.../clientpositive/llap/is_distinct_from.q.out | 6 +-
.../results/clientpositive/llap/join46.q.out | 16 +-
.../llap/join_acid_non_acid.q.out | 14 +-
.../llap/join_emit_interval.q.out | 16 +-
.../results/clientpositive/llap/lineage2.q.out | 6 +-
.../results/clientpositive/llap/llap_smb.q.out | 2 +-
.../results/clientpositive/llap/mapjoin2.q.out | 12 +-
.../results/clientpositive/llap/mapjoin3.q.out | 10 +-
.../results/clientpositive/llap/mapjoin46.q.out | 16 +-
.../llap/mapjoin_emit_interval.q.out | 16 +-
.../clientpositive/llap/mapjoin_hint.q.out | 26 +-
.../llap/materialized_view_create.q.out | 24 +-
.../llap/materialized_view_create_rewrite.q.out | 10 +-
.../materialized_view_create_rewrite_2.q.out | 20 +-
.../materialized_view_create_rewrite_3.q.out | 40 +-
...erialized_view_create_rewrite_multi_db.q.out | 10 +-
.../llap/materialized_view_describe.q.out | 8 +-
.../results/clientpositive/llap/mm_all.q.out | 16 +-
.../clientpositive/llap/multi_column_in.q.out | 8 +-
.../llap/multi_column_in_single.q.out | 8 +-
.../llap/multi_count_distinct_null.q.out | 8 +-
.../clientpositive/llap/optimize_join_ptp.q.out | 12 +-
.../clientpositive/llap/orc_ppd_basic.q.out | 3 +
.../clientpositive/llap/order_null.q.out | 36 +-
.../clientpositive/llap/partition_pruning.q.out | 8 +-
.../llap/reduce_deduplicate_distinct.q.out | 8 +-
.../clientpositive/llap/resourceplan.q.out | 8 +-
.../llap/schema_evol_orc_acid_part.q.out | 234 +++---
.../llap/schema_evol_orc_acid_table.q.out | 226 +++---
.../llap/schema_evol_orc_acidvec_part.q.out | 234 +++---
.../llap/schema_evol_orc_acidvec_table.q.out | 226 +++---
.../llap/schema_evol_orc_nonvec_part.q.out | 234 +++---
...chema_evol_orc_nonvec_part_all_complex.q.out | 8 +-
.../llap/schema_evol_orc_nonvec_table.q.out | 226 +++---
.../llap/schema_evol_orc_vec_part.q.out | 234 +++---
.../schema_evol_orc_vec_part_all_complex.q.out | 8 +-
.../llap/schema_evol_orc_vec_table.q.out | 226 +++---
.../clientpositive/llap/schema_evol_stats.q.out | 32 +-
.../llap/schema_evol_text_nonvec_part.q.out | 234 +++---
...hema_evol_text_nonvec_part_all_complex.q.out | 8 +-
.../llap/schema_evol_text_nonvec_table.q.out | 226 +++---
.../llap/schema_evol_text_vec_part.q.out | 234 +++---
.../schema_evol_text_vec_part_all_complex.q.out | 8 +-
.../llap/schema_evol_text_vec_table.q.out | 226 +++---
.../llap/schema_evol_text_vecrow_part.q.out | 234 +++---
...hema_evol_text_vecrow_part_all_complex.q.out | 8 +-
.../llap/schema_evol_text_vecrow_table.q.out | 226 +++---
.../results/clientpositive/llap/semijoin6.q.out | 12 +-
.../results/clientpositive/llap/semijoin7.q.out | 12 +-
.../clientpositive/llap/skiphf_aggr.q.out | 24 +-
.../llap/special_character_in_tabnames_1.q.out | 8 +-
.../clientpositive/llap/subquery_exists.q.out | 26 +-
.../clientpositive/llap/subquery_in.q.out | 24 +-
.../llap/subquery_in_having.q.out | 6 +-
.../clientpositive/llap/subquery_multi.q.out | 32 +-
.../clientpositive/llap/subquery_notin.q.out | 40 +-
.../clientpositive/llap/subquery_scalar.q.out | 160 ++--
.../clientpositive/llap/subquery_select.q.out | 4 +-
.../clientpositive/llap/tez_nway_join.q.out | 8 +-
.../clientpositive/llap/tez_self_join.q.out | 12 +-
.../llap/tez_union_dynamic_partition.q.out | 4 +-
.../llap/tez_union_dynamic_partition_2.q.out | 4 +-
.../llap/update_after_multiple_inserts.q.out | 16 +-
.../clientpositive/llap/vector_acid3.q.out | 4 +-
.../llap/vector_adaptor_usage_mode.q.out | 6 +-
.../llap/vector_aggregate_without_gby.q.out | 8 +-
.../clientpositive/llap/vector_bround.q.out | 6 +-
.../clientpositive/llap/vector_bucket.q.out | 66 +-
.../clientpositive/llap/vector_char_cast.q.out | 4 +-
.../clientpositive/llap/vector_coalesce_2.q.out | 6 +-
.../clientpositive/llap/vector_coalesce_3.q.out | 10 +-
.../llap/vector_complex_join.q.out | 6 +-
.../llap/vector_decimal_round.q.out | 12 +-
.../llap/vector_decimal_round_2.q.out | 20 +-
.../clientpositive/llap/vector_inner_join.q.out | 20 +-
.../llap/vector_null_projection.q.out | 8 +-
.../llap/vector_number_compare_projection.q.out | 16 +-
.../llap/vector_outer_join0.q.out | 12 +-
.../vector_reduce_groupby_duplicate_cols.q.out | 8 +-
.../clientpositive/llap/vector_struct_in.q.out | 26 +-
.../llap/vector_udf_character_length.q.out | 4 +-
.../llap/vector_udf_octet_length.q.out | 4 +-
.../llap/vector_when_case_null.q.out | 6 +-
.../llap/vector_windowing_windowspec4.q.out | 8 +-
.../clientpositive/llap/vectorized_case.q.out | 12 +-
.../vectorized_insert_into_bucketed_table.q.out | 76 +-
.../clientpositive/llap/vectorized_join46.q.out | 16 +-
.../llap/vectorized_timestamp.q.out | 4 +-
.../test/results/clientpositive/llap_acid.q.out | 10 +-
.../results/clientpositive/llap_acid_fast.q.out | 10 +-
.../results/clientpositive/llap_reader.q.out | 2 +
.../results/clientpositive/localtimezone.q.out | 20 +-
.../test/results/clientpositive/macro_1.q.out | 4 +-
.../clientpositive/macro_duplicate.q.out | 16 +-
.../test/results/clientpositive/mapjoin2.q.out | 12 +-
.../test/results/clientpositive/mapjoin3.q.out | 10 +-
.../test/results/clientpositive/mapjoin46.q.out | 16 +-
...materialized_view_authorization_sqlstd.q.out | 8 +-
.../materialized_view_create.q.out | 24 +-
.../materialized_view_create_rewrite.q.out | 10 +-
.../materialized_view_create_rewrite_2.q.out | 20 +-
.../materialized_view_create_rewrite_3.q.out | 40 +-
...erialized_view_create_rewrite_multi_db.q.out | 10 +-
.../materialized_view_describe.q.out | 8 +-
.../clientpositive/metadata_empty_table.q.out | 4 +-
ql/src/test/results/clientpositive/mm_all.q.out | 16 +-
.../clientpositive/multi_insert_with_join.q.out | 16 +-
.../multi_insert_with_join2.q.out | 12 +-
.../clientpositive/named_column_join.q.out | 6 +-
.../clientpositive/nested_column_pruning.q.out | 4 +-
.../clientpositive/num_op_type_conv.q.out | 6 +-
.../results/clientpositive/orc_merge13.q.out | 32 +-
.../clientpositive/orc_ppd_exception.q.out | 6 +-
.../clientpositive/orc_ppd_str_conversion.q.out | 6 +-
.../orc_schema_evolution_float.q.out | 8 +-
ql/src/test/results/clientpositive/order3.q.out | 8 +-
.../clientpositive/order_by_expr_1.q.out | 6 +-
.../clientpositive/order_by_expr_2.q.out | 6 +-
.../results/clientpositive/order_by_pos.q.out | 6 +-
.../results/clientpositive/parquet_join2.q.out | 16 +-
.../clientpositive/parquet_no_row_serde.q.out | 8 +-
.../clientpositive/parquet_ppd_multifiles.q.out | 12 +-
.../clientpositive/parquet_ppd_partition.q.out | 6 +-
.../parquet_predicate_pushdown_2.q.out | 6 +-
.../clientpositive/partition_boolean.q.out | 48 +-
.../results/clientpositive/ptfgroupbyjoin.q.out | 22 +-
.../clientpositive/remove_exprs_stats.q.out | 8 +-
.../test/results/clientpositive/row__id.q.out | 18 +-
.../results/clientpositive/selectindate.q.out | 24 +-
.../clientpositive/setop_no_distinct.q.out | 20 +-
.../clientpositive/skewjoin_onesideskew.q.out | 30 +-
.../clientpositive/smb_join_partition_key.q.out | 12 +-
.../results/clientpositive/smb_mapjoin_46.q.out | 16 +-
.../results/clientpositive/smb_mapjoin_47.q.out | 16 +-
.../spark/bucket_map_join_tez2.q.out | 12 +-
.../spark/constprog_semijoin.q.out | 20 +-
.../spark/explaindenpendencydiffengs.q.out | 4 +-
.../clientpositive/spark/insert_into1.q.out | 40 +-
.../clientpositive/spark/insert_into2.q.out | 6 +-
.../spark/multi_insert_with_join.q.out | 16 +-
.../spark/spark_combine_equivalent_work.q.out | 6 +-
.../spark_dynamic_partition_pruning_3.q.out | 72 +-
.../spark_dynamic_partition_pruning_4.q.out | 52 +-
...ic_partition_pruning_recursive_mapjoin.q.out | 64 +-
.../spark/spark_use_ts_stats_for_mapjoin.q.out | 30 +-
.../clientpositive/spark/subquery_exists.q.out | 26 +-
.../clientpositive/spark/subquery_in.q.out | 24 +-
.../clientpositive/spark/subquery_multi.q.out | 32 +-
.../clientpositive/spark/subquery_notin.q.out | 40 +-
.../clientpositive/spark/subquery_scalar.q.out | 160 ++--
.../clientpositive/spark/subquery_select.q.out | 4 +-
.../spark/vector_inner_join.q.out | 20 +-
.../spark/vector_outer_join0.q.out | 12 +-
.../vectorization_parquet_projection.q.out | 6 +-
.../clientpositive/spark/vectorized_case.q.out | 12 +-
.../results/clientpositive/specialChar.q.out | 40 +-
.../clientpositive/stats_empty_partition2.q.out | 16 +-
.../clientpositive/stats_partial_size.q.out | 4 +-
.../results/clientpositive/stats_ppr_all.q.out | 18 +-
.../clientpositive/subquery_exists.q.out | 26 +-
.../clientpositive/subquery_notin_having.q.out | 10 +-
.../results/clientpositive/tablevalues.q.out | 764 +++++++++++++++++++
.../clientpositive/testSetQueryString.q.out | 4 +-
.../tez/acid_vectorization_original_tez.q.out | 4 +-
.../clientpositive/tez/explainanalyze_5.q.out | 16 +-
.../tez/multi_count_distinct.q.out | 8 +-
.../tez/vector_join_part_col_char.q.out | 8 +-
.../results/clientpositive/timestamptz_2.q.out | 4 +-
.../results/clientpositive/transform3.q.out | 4 +-
.../udaf_binarysetfunctions.q.out | 290 ++++---
.../clientpositive/udf_character_length.q.out | 4 +-
.../clientpositive/udf_folder_constants.q.out | 10 +-
.../test/results/clientpositive/udf_isops.q.out | 24 +-
.../results/clientpositive/udf_nullif.q.out | 10 +-
.../clientpositive/udf_octet_length.q.out | 4 +-
.../clientpositive/udf_stddev_samp.q.out | 4 +-
.../results/clientpositive/udf_var_samp.q.out | 4 +-
.../clientpositive/udf_width_bucket.q.out | 100 ++-
.../clientpositive/udtf_replicate_rows.q.out | 8 +-
.../test/results/clientpositive/union37.q.out | 16 +-
.../results/clientpositive/union_paren.q.out | 12 +-
.../clientpositive/unionall_unbalancedppd.q.out | 20 +-
.../clientpositive/updateBasicStats.q.out | 4 +-
.../update_after_multiple_inserts.q.out | 16 +-
...er_multiple_inserts_special_characters.q.out | 16 +-
.../results/clientpositive/vector_acid3.q.out | 4 +-
.../vector_aggregate_without_gby.q.out | 8 +-
.../results/clientpositive/vector_bround.q.out | 6 +-
.../results/clientpositive/vector_bucket.q.out | 62 +-
.../clientpositive/vector_char_cast.q.out | 4 +-
.../clientpositive/vector_coalesce_2.q.out | 6 +-
.../clientpositive/vector_coalesce_3.q.out | 10 +-
.../clientpositive/vector_complex_join.q.out | 6 +-
.../results/clientpositive/vector_const.q.out | 4 +-
.../vector_custom_udf_configure.q.out | 6 +-
.../clientpositive/vector_decimal_round.q.out | 12 +-
.../clientpositive/vector_decimal_round_2.q.out | 20 +-
.../clientpositive/vector_gather_stats.q.out | 18 +-
.../clientpositive/vector_if_expr_2.q.out | 6 +-
.../vector_join_part_col_char.q.out | 8 +-
.../results/clientpositive/vector_like_2.q.out | 4 +-
.../clientpositive/vector_null_projection.q.out | 8 +-
.../clientpositive/vector_order_null.q.out | 42 +-
.../clientpositive/vector_outer_join0.q.out | 12 +-
.../vector_reduce_groupby_duplicate_cols.q.out | 8 +-
.../clientpositive/vector_string_decimal.q.out | 4 +-
.../clientpositive/vector_struct_in.q.out | 26 +-
.../results/clientpositive/vector_udf2.q.out | 8 +-
.../vector_udf_character_length.q.out | 8 +-
.../vector_udf_octet_length.q.out | 6 +-
.../vector_udf_string_to_boolean.q.out | 48 +-
.../clientpositive/vector_when_case_null.q.out | 6 +-
.../vectorization_parquet_projection.q.out | 6 +-
.../clientpositive/vectorized_case.q.out | 12 +-
.../clientpositive/vectorized_mapjoin2.q.out | 8 +-
.../clientpositive/vectorized_timestamp.q.out | 4 +-
.../clientpositive/windowing_windowspec4.q.out | 6 +-
.../compiler/errors/wrong_distinct2.q.out | 2 +-
.../ObjectInspectorConverters.java | 7 +
369 files changed, 6957 insertions(+), 5358 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/accumulo-handler/src/test/results/positive/accumulo_index.q.out
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/results/positive/accumulo_index.q.out b/accumulo-handler/src/test/results/positive/accumulo_index.q.out
index 5cb3d73..a7c66a7 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_index.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_index.q.out
@@ -56,11 +56,13 @@ PREHOOK: query: insert into accumulo_index_test values( "row1", true, 55, 107, 5
4.5, 0.8, 1232223, "2001-10-10", "123 main street",
"555-555-5555", "2016-02-22 12:45:07.000000000")
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@accumulo_index_test
POSTHOOK: query: insert into accumulo_index_test values( "row1", true, 55, 107, 555555, 1223232332,
4.5, 0.8, 1232223, "2001-10-10", "123 main street",
"555-555-5555", "2016-02-22 12:45:07.000000000")
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@accumulo_index_test
PREHOOK: query: select * from accumulo_index_test where active = 'true'
PREHOOK: type: QUERY
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/hbase-handler/src/test/results/positive/hbase_binary_binary.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_binary_binary.q.out b/hbase-handler/src/test/results/positive/hbase_binary_binary.q.out
index 0e35cc3..e04227f 100644
--- a/hbase-handler/src/test/results/positive/hbase_binary_binary.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_binary_binary.q.out
@@ -20,15 +20,19 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@testhbaseb
PREHOOK: query: insert into table testhbaseb values(1, 'hello')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@testhbaseb
POSTHOOK: query: insert into table testhbaseb values(1, 'hello')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@testhbaseb
PREHOOK: query: insert into table testhbaseb values(2, 'hi')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@testhbaseb
POSTHOOK: query: insert into table testhbaseb values(2, 'hi')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@testhbaseb
PREHOOK: query: select * from testhbaseb
PREHOOK: type: QUERY
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/hbase-handler/src/test/results/positive/hbase_decimal_decimal.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_decimal_decimal.q.out b/hbase-handler/src/test/results/positive/hbase_decimal_decimal.q.out
index f719e95..6bd4f73 100644
Binary files a/hbase-handler/src/test/results/positive/hbase_decimal_decimal.q.out and b/hbase-handler/src/test/results/positive/hbase_decimal_decimal.q.out differ
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/hbase-handler/src/test/results/positive/hbase_tablename_property.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_tablename_property.q.out b/hbase-handler/src/test/results/positive/hbase_tablename_property.q.out
index c5c4456..234f34a 100644
--- a/hbase-handler/src/test/results/positive/hbase_tablename_property.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_tablename_property.q.out
@@ -18,9 +18,11 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@hbase_table_1
PREHOOK: query: INSERT INTO hbase_table_1 VALUES(1, 'value1')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@hbase_table_1
POSTHOOK: query: INSERT INTO hbase_table_1 VALUES(1, 'value1')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@hbase_table_1
PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_2(key int comment 'It is a column key', value string comment 'It is the column string value')
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out
index ebf2daa..caa0029 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out
@@ -14,34 +14,38 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@table1
PREHOOK: query: INSERT INTO TABLE table1 PARTITION (key) VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1
POSTHOOK: query: INSERT INTO TABLE table1 PARTITION (key) VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1@key=101
POSTHOOK: Output: default@table1@key=202
POSTHOOK: Output: default@table1@key=303
POSTHOOK: Output: default@table1@key=404
POSTHOOK: Output: default@table1@key=505
-POSTHOOK: Lineage: table1 PARTITION(key=101).id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=202).id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=303).id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=404).id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=505).id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1 PARTITION(key=101).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=202).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=303).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=404).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=505).id SCRIPT []
PREHOOK: query: INSERT INTO TABLE table1 PARTITION (key) VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1
POSTHOOK: query: INSERT INTO TABLE table1 PARTITION (key) VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1@key=101
POSTHOOK: Output: default@table1@key=202
POSTHOOK: Output: default@table1@key=303
POSTHOOK: Output: default@table1@key=404
POSTHOOK: Output: default@table1@key=505
-POSTHOOK: Lineage: table1 PARTITION(key=101).id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=202).id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=303).id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=404).id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=505).id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1 PARTITION(key=101).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=202).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=303).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=404).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=505).id SCRIPT []
PREHOOK: query: SELECT * FROM table1
PREHOOK: type: QUERY
PREHOOK: Input: default@table1
@@ -84,59 +88,67 @@ STAGE PLANS:
Map Reduce
Map Operator Tree:
TableScan
- alias: values__tmp__table__3
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
+ alias: _dummy_table
+ Row Limit Per Split: 1
+ Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
GatherStats: false
Select Operator
- expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col1 (type: string), '_bucket_number' (type: string)
- null sort order: aa
- sort order: ++
- Map-reduce partition columns: _col1 (type: string)
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
- tag: -1
- value expressions: _col0 (type: int)
- auto parallelism: false
+ expressions: array(const struct(1,'101'),const struct(2,'202'),const struct(3,'303'),const struct(4,'404'),const struct(5,'505')) (type: array<struct<col1:int,col2:string>>)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+ UDTF Operator
+ Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+ function name: inline
+ Select Operator
+ expressions: col1 (type: int), col2 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col1 (type: string), '_bucket_number' (type: string)
+ null sort order: aa
+ sort order: ++
+ Map-reduce partition columns: _col1 (type: string)
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ tag: -1
+ value expressions: _col0 (type: int)
+ auto parallelism: false
Path -> Alias:
#### A masked pattern was here ####
Path -> Partition:
#### A masked pattern was here ####
Partition
- base file name: Values__Tmp__Table__3
- input format: org.apache.hadoop.mapred.TextInputFormat
+ base file name: dummy_path
+ input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
bucket_count -1
column.name.delimiter ,
- columns tmp_values_col1,tmp_values_col2
+ columns
columns.comments
- columns.types string:string
+ columns.types
#### A masked pattern was here ####
- name default.values__tmp__table__3
- serialization.ddl struct values__tmp__table__3 { string tmp_values_col1, string tmp_values_col2}
+ name _dummy_database._dummy_table
+ serialization.ddl struct _dummy_table { }
serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+ serde: org.apache.hadoop.hive.serde2.NullStructSerDe
- input format: org.apache.hadoop.mapred.TextInputFormat
+ input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
bucket_count -1
column.name.delimiter ,
- columns tmp_values_col1,tmp_values_col2
+ columns
columns.comments
- columns.types string:string
+ columns.types
#### A masked pattern was here ####
- name default.values__tmp__table__3
- serialization.ddl struct values__tmp__table__3 { string tmp_values_col1, string tmp_values_col2}
+ name _dummy_database._dummy_table
+ serialization.ddl struct _dummy_table { }
serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.values__tmp__table__3
- name: default.values__tmp__table__3
+ serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+ serde: org.apache.hadoop.hive.serde2.NullStructSerDe
+ name: _dummy_database._dummy_table
+ name: _dummy_database._dummy_table
Truncated Path -> Alias:
#### A masked pattern was here ####
Needs Tagging: false
@@ -144,14 +156,14 @@ STAGE PLANS:
Select Operator
expressions: VALUE._col0 (type: int), KEY._col1 (type: string), KEY.'_bucket_number' (type: string)
outputColumnNames: _col0, _col1, '_bucket_number'
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
GlobalTableId: 1
directory: ### BLOBSTORE_STAGING_PATH ###
Dp Sort State: PARTITION_BUCKET_SORTED
NumFilesPerFileSink: 1
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
Stats Publishing Key Prefix: ### BLOBSTORE_STAGING_PATH ###
table:
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -235,28 +247,34 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@table1
PREHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='CA') values ('John Doe', 23), ('Jane Doe', 22)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1@country=USA/state=CA
POSTHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='CA') values ('John Doe', 23), ('Jane Doe', 22)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1@country=USA/state=CA
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).age EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).name SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).age SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).name SCRIPT []
PREHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='CA') values ('Mark Cage', 38), ('Mirna Cage', 37)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1@country=USA/state=CA
POSTHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='CA') values ('Mark Cage', 38), ('Mirna Cage', 37)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1@country=USA/state=CA
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).age EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).name SIMPLE [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).age SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).name SCRIPT []
PREHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='TX') values ('Bill Rose', 52), ('Maria Full', 50)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1@country=USA/state=TX
POSTHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='TX') values ('Bill Rose', 52), ('Maria Full', 50)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1@country=USA/state=TX
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=TX).age EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=TX).name SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=TX).age SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=TX).name SCRIPT []
#### A masked pattern was here ####
PREHOOK: type: CREATETABLE
PREHOOK: Input: ### test.blobstore.path ###/table2
@@ -286,12 +304,14 @@ POSTHOOK: Lineage: table2 PARTITION(country=USA,state=TX).age SIMPLE [(table1)ta
POSTHOOK: Lineage: table2 PARTITION(country=USA,state=TX).name SIMPLE [(table1)table1.FieldSchema(name:name, type:string, comment:null), ]
PREHOOK: query: INSERT INTO TABLE table2 PARTITION (country='MEX', state) VALUES ('Peter Mo', 87, 'SON')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table2@country=MEX
POSTHOOK: query: INSERT INTO TABLE table2 PARTITION (country='MEX', state) VALUES ('Peter Mo', 87, 'SON')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table2@country=MEX/state=SON
-POSTHOOK: Lineage: table2 PARTITION(country=MEX,state=SON).age EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table2 PARTITION(country=MEX,state=SON).name SIMPLE [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table2 PARTITION(country=MEX,state=SON).age SCRIPT []
+POSTHOOK: Lineage: table2 PARTITION(country=MEX,state=SON).name SCRIPT []
PREHOOK: query: SHOW PARTITIONS table2
PREHOOK: type: SHOWPARTITIONS
PREHOOK: Input: default@table2
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
index 40d2571..ab8ad77 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
@@ -14,18 +14,22 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@table1
PREHOOK: query: INSERT INTO TABLE table1 VALUES (1)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1
POSTHOOK: query: INSERT INTO TABLE table1 VALUES (1)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1.id SCRIPT []
PREHOOK: query: INSERT INTO TABLE table1 VALUES (2)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1
POSTHOOK: query: INSERT INTO TABLE table1 VALUES (2)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1.id SCRIPT []
PREHOOK: query: SELECT * FROM table1
PREHOOK: type: QUERY
PREHOOK: Input: default@table1
@@ -55,99 +59,107 @@ STAGE PLANS:
Map Reduce
Map Operator Tree:
TableScan
- alias: values__tmp__table__3
- Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ alias: _dummy_table
+ Row Limit Per Split: 1
+ Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
GatherStats: false
Select Operator
- expressions: UDFToInteger(tmp_values_col1) (type: int)
+ expressions: array(const struct(1)) (type: array<struct<col1:int>>)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- GlobalTableId: 1
- directory: ### BLOBSTORE_STAGING_PATH ###
- NumFilesPerFileSink: 1
- Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
- Stats Publishing Key Prefix: ### BLOBSTORE_STAGING_PATH ###
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"id":"true"}}
- bucket_count -1
- column.name.delimiter ,
- columns id
- columns.comments
- columns.types int
+ Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+ UDTF Operator
+ Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+ function name: inline
+ Select Operator
+ expressions: col1 (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+ directory: ### BLOBSTORE_STAGING_PATH ###
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Stats Publishing Key Prefix: ### BLOBSTORE_STAGING_PATH ###
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"id":"true"}}
+ bucket_count -1
+ column.name.delimiter ,
+ columns id
+ columns.comments
+ columns.types int
#### A masked pattern was here ####
- location ### test.blobstore.path ###/table1
- name default.table1
- numFiles 2
- numRows 2
- rawDataSize 2
- serialization.ddl struct table1 { i32 id}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 4
+ location ### test.blobstore.path ###/table1
+ name default.table1
+ numFiles 2
+ numRows 2
+ rawDataSize 2
+ serialization.ddl struct table1 { i32 id}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 4
#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.table1
- TotalFiles: 1
- GatherStats: true
- MultiFileSpray: false
- Select Operator
- expressions: _col0 (type: int)
- outputColumnNames: id
- Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
- Group By Operator
- aggregations: compute_stats(id, 'hll')
- mode: hash
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- null sort order:
- sort order:
- Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: NONE
- tag: -1
- value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
- auto parallelism: false
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.table1
+ TotalFiles: 1
+ GatherStats: true
+ MultiFileSpray: false
+ Select Operator
+ expressions: _col0 (type: int)
+ outputColumnNames: id
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: compute_stats(id, 'hll')
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ null sort order:
+ sort order:
+ Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
+ tag: -1
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ auto parallelism: false
Path -> Alias:
#### A masked pattern was here ####
Path -> Partition:
#### A masked pattern was here ####
Partition
- base file name: Values__Tmp__Table__3
- input format: org.apache.hadoop.mapred.TextInputFormat
+ base file name: dummy_path
+ input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
bucket_count -1
column.name.delimiter ,
- columns tmp_values_col1
+ columns
columns.comments
- columns.types string
+ columns.types
#### A masked pattern was here ####
- name default.values__tmp__table__3
- serialization.ddl struct values__tmp__table__3 { string tmp_values_col1}
+ name _dummy_database._dummy_table
+ serialization.ddl struct _dummy_table { }
serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+ serde: org.apache.hadoop.hive.serde2.NullStructSerDe
- input format: org.apache.hadoop.mapred.TextInputFormat
+ input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
bucket_count -1
column.name.delimiter ,
- columns tmp_values_col1
+ columns
columns.comments
- columns.types string
+ columns.types
#### A masked pattern was here ####
- name default.values__tmp__table__3
- serialization.ddl struct values__tmp__table__3 { string tmp_values_col1}
+ name _dummy_database._dummy_table
+ serialization.ddl struct _dummy_table { }
serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.values__tmp__table__3
- name: default.values__tmp__table__3
+ serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+ serde: org.apache.hadoop.hive.serde2.NullStructSerDe
+ name: _dummy_database._dummy_table
+ name: _dummy_database._dummy_table
Truncated Path -> Alias:
#### A masked pattern was here ####
Needs Tagging: false
@@ -156,13 +168,13 @@ STAGE PLANS:
aggregations: compute_stats(VALUE._col0)
mode: mergepartial
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
GlobalTableId: 0
#### A masked pattern was here ####
NumFilesPerFileSink: 1
- Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
#### A masked pattern was here ####
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_directory.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_directory.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_directory.q.out
index 17db9db..2b28a66 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_directory.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_directory.q.out
@@ -12,20 +12,24 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@table1
PREHOOK: query: INSERT INTO TABLE table1 VALUES (1, 'k1')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1
POSTHOOK: query: INSERT INTO TABLE table1 VALUES (1, 'k1')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1.key SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: table1.id SCRIPT []
+POSTHOOK: Lineage: table1.key SCRIPT []
PREHOOK: query: INSERT INTO TABLE table1 VALUES (2, 'k2')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1
POSTHOOK: query: INSERT INTO TABLE table1 VALUES (2, 'k2')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1.key SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: table1.id SCRIPT []
+POSTHOOK: Lineage: table1.key SCRIPT []
PREHOOK: query: INSERT OVERWRITE DIRECTORY '### test.blobstore.path ###/table1.dir/' SELECT * FROM table1
PREHOOK: type: QUERY
PREHOOK: Input: default@table1
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out
index 5cf69d8..cdb67dd 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out
@@ -14,19 +14,21 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@table1
PREHOOK: query: INSERT OVERWRITE TABLE table1 PARTITION (key) VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1
POSTHOOK: query: INSERT OVERWRITE TABLE table1 PARTITION (key) VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1@key=101
POSTHOOK: Output: default@table1@key=202
POSTHOOK: Output: default@table1@key=303
POSTHOOK: Output: default@table1@key=404
POSTHOOK: Output: default@table1@key=505
-POSTHOOK: Lineage: table1 PARTITION(key=101).id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=202).id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=303).id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=404).id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=505).id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1 PARTITION(key=101).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=202).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=303).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=404).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=505).id SCRIPT []
PREHOOK: query: SELECT * FROM table1
PREHOOK: type: QUERY
PREHOOK: Input: default@table1
@@ -52,19 +54,21 @@ POSTHOOK: Input: default@table1@key=505
5 505
PREHOOK: query: INSERT OVERWRITE TABLE table1 PARTITION (key) VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1
POSTHOOK: query: INSERT OVERWRITE TABLE table1 PARTITION (key) VALUES (1, '101'), (2, '202'), (3, '303'), (4, '404'), (5, '505')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1@key=101
POSTHOOK: Output: default@table1@key=202
POSTHOOK: Output: default@table1@key=303
POSTHOOK: Output: default@table1@key=404
POSTHOOK: Output: default@table1@key=505
-POSTHOOK: Lineage: table1 PARTITION(key=101).id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=202).id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=303).id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=404).id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(key=505).id EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1 PARTITION(key=101).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=202).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=303).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=404).id SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(key=505).id SCRIPT []
PREHOOK: query: SELECT * FROM table1
PREHOOK: type: QUERY
PREHOOK: Input: default@table1
@@ -102,59 +106,67 @@ STAGE PLANS:
Map Reduce
Map Operator Tree:
TableScan
- alias: values__tmp__table__3
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
+ alias: _dummy_table
+ Row Limit Per Split: 1
+ Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
GatherStats: false
Select Operator
- expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col1 (type: string), '_bucket_number' (type: string)
- null sort order: aa
- sort order: ++
- Map-reduce partition columns: _col1 (type: string)
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
- tag: -1
- value expressions: _col0 (type: int)
- auto parallelism: false
+ expressions: array(const struct(1,'101'),const struct(2,'202'),const struct(3,'303'),const struct(4,'404'),const struct(5,'505')) (type: array<struct<col1:int,col2:string>>)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+ UDTF Operator
+ Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+ function name: inline
+ Select Operator
+ expressions: col1 (type: int), col2 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col1 (type: string), '_bucket_number' (type: string)
+ null sort order: aa
+ sort order: ++
+ Map-reduce partition columns: _col1 (type: string)
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ tag: -1
+ value expressions: _col0 (type: int)
+ auto parallelism: false
Path -> Alias:
#### A masked pattern was here ####
Path -> Partition:
#### A masked pattern was here ####
Partition
- base file name: Values__Tmp__Table__3
- input format: org.apache.hadoop.mapred.TextInputFormat
+ base file name: dummy_path
+ input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
bucket_count -1
column.name.delimiter ,
- columns tmp_values_col1,tmp_values_col2
+ columns
columns.comments
- columns.types string:string
+ columns.types
#### A masked pattern was here ####
- name default.values__tmp__table__3
- serialization.ddl struct values__tmp__table__3 { string tmp_values_col1, string tmp_values_col2}
+ name _dummy_database._dummy_table
+ serialization.ddl struct _dummy_table { }
serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+ serde: org.apache.hadoop.hive.serde2.NullStructSerDe
- input format: org.apache.hadoop.mapred.TextInputFormat
+ input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
properties:
bucket_count -1
column.name.delimiter ,
- columns tmp_values_col1,tmp_values_col2
+ columns
columns.comments
- columns.types string:string
+ columns.types
#### A masked pattern was here ####
- name default.values__tmp__table__3
- serialization.ddl struct values__tmp__table__3 { string tmp_values_col1, string tmp_values_col2}
+ name _dummy_database._dummy_table
+ serialization.ddl struct _dummy_table { }
serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.values__tmp__table__3
- name: default.values__tmp__table__3
+ serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+ serde: org.apache.hadoop.hive.serde2.NullStructSerDe
+ name: _dummy_database._dummy_table
+ name: _dummy_database._dummy_table
Truncated Path -> Alias:
#### A masked pattern was here ####
Needs Tagging: false
@@ -162,14 +174,14 @@ STAGE PLANS:
Select Operator
expressions: VALUE._col0 (type: int), KEY._col1 (type: string), KEY.'_bucket_number' (type: string)
outputColumnNames: _col0, _col1, '_bucket_number'
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
GlobalTableId: 1
directory: ### BLOBSTORE_STAGING_PATH ###
Dp Sort State: PARTITION_BUCKET_SORTED
NumFilesPerFileSink: 1
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
Stats Publishing Key Prefix: ### BLOBSTORE_STAGING_PATH ###
table:
input format: org.apache.hadoop.mapred.TextInputFormat
@@ -253,28 +265,34 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@table1
PREHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='CA') values ('John Doe', 23), ('Jane Doe', 22)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1@country=USA/state=CA
POSTHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='CA') values ('John Doe', 23), ('Jane Doe', 22)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1@country=USA/state=CA
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).age EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).name SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).age SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).name SCRIPT []
PREHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='CA') values ('Mark Cage', 38), ('Mirna Cage', 37)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1@country=USA/state=CA
POSTHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='CA') values ('Mark Cage', 38), ('Mirna Cage', 37)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1@country=USA/state=CA
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).age EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).name SIMPLE [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).age SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=CA).name SCRIPT []
PREHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='TX') values ('Bill Rose', 52), ('Maria Full', 50)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table1@country=USA/state=TX
POSTHOOK: query: INSERT INTO table1 PARTITION (country='USA', state='TX') values ('Bill Rose', 52), ('Maria Full', 50)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table1@country=USA/state=TX
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=TX).age EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1 PARTITION(country=USA,state=TX).name SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=TX).age SCRIPT []
+POSTHOOK: Lineage: table1 PARTITION(country=USA,state=TX).name SCRIPT []
#### A masked pattern was here ####
PREHOOK: type: CREATETABLE
PREHOOK: Input: ### test.blobstore.path ###/table2
@@ -368,12 +386,14 @@ Maria Full 50 USA TX
Bill Rose 52 USA TX
PREHOOK: query: INSERT OVERWRITE TABLE table2 PARTITION (country='MEX', state) VALUES ('Peter Mo', 87, 'SON')
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@table2@country=MEX
POSTHOOK: query: INSERT OVERWRITE TABLE table2 PARTITION (country='MEX', state) VALUES ('Peter Mo', 87, 'SON')
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@table2@country=MEX/state=SON
-POSTHOOK: Lineage: table2 PARTITION(country=MEX,state=SON).age EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table2 PARTITION(country=MEX,state=SON).name SIMPLE [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: table2 PARTITION(country=MEX,state=SON).age SCRIPT []
+POSTHOOK: Lineage: table2 PARTITION(country=MEX,state=SON).name SCRIPT []
PREHOOK: query: SHOW PARTITIONS table2
PREHOOK: type: SHOWPARTITIONS
PREHOOK: Input: default@table2
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_merge_move.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_merge_move.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_merge_move.q.out
index bfebad6..a7c7bb9 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_merge_move.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_merge_move.q.out
@@ -8,14 +8,16 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@tmp_table_merge_move
PREHOOK: query: INSERT INTO tmp_table_merge_move values ('u1','name1','2017-04-10',10000), ('u2','name2','2017-04-10',10000), ('u3','name3','2017-04-10',10000), ('u4','name4','2017-04-10',10001), ('u5','name5','2017-04-10',10002)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@tmp_table_merge_move
POSTHOOK: query: INSERT INTO tmp_table_merge_move values ('u1','name1','2017-04-10',10000), ('u2','name2','2017-04-10',10000), ('u3','name3','2017-04-10',10000), ('u4','name4','2017-04-10',10001), ('u5','name5','2017-04-10',10002)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@tmp_table_merge_move
-POSTHOOK: Lineage: tmp_table_merge_move.dt SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: tmp_table_merge_move.id SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: tmp_table_merge_move.name SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: tmp_table_merge_move.pid EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: tmp_table_merge_move.dt SCRIPT []
+POSTHOOK: Lineage: tmp_table_merge_move.id SCRIPT []
+POSTHOOK: Lineage: tmp_table_merge_move.name SCRIPT []
+POSTHOOK: Lineage: tmp_table_merge_move.pid SCRIPT []
#### A masked pattern was here ####
PREHOOK: type: CREATETABLE
PREHOOK: Input: ### test.blobstore.path ###/s3_table_merge_move
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_merge_only.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_merge_only.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_merge_only.q.out
index 1bffae3..09b74bb 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_merge_only.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_merge_only.q.out
@@ -8,14 +8,16 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@tmp_table_merge
PREHOOK: query: INSERT INTO tmp_table_merge values ('u1','name1','2017-04-10',10000), ('u2','name2','2017-04-10',10000), ('u3','name3','2017-04-10',10000), ('u4','name4','2017-04-10',10001), ('u5','name5','2017-04-10',10001)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@tmp_table_merge
POSTHOOK: query: INSERT INTO tmp_table_merge values ('u1','name1','2017-04-10',10000), ('u2','name2','2017-04-10',10000), ('u3','name3','2017-04-10',10000), ('u4','name4','2017-04-10',10001), ('u5','name5','2017-04-10',10001)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@tmp_table_merge
-POSTHOOK: Lineage: tmp_table_merge.dt SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: tmp_table_merge.id SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: tmp_table_merge.name SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: tmp_table_merge.pid EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: tmp_table_merge.dt SCRIPT []
+POSTHOOK: Lineage: tmp_table_merge.id SCRIPT []
+POSTHOOK: Lineage: tmp_table_merge.name SCRIPT []
+POSTHOOK: Lineage: tmp_table_merge.pid SCRIPT []
#### A masked pattern was here ####
PREHOOK: type: CREATETABLE
PREHOOK: Input: ### test.blobstore.path ###/s3_table_merge
http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_move_only.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_move_only.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_move_only.q.out
index cc1d018..b60b0a9 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_move_only.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions_move_only.q.out
@@ -8,14 +8,16 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@tmp_table_move
PREHOOK: query: INSERT INTO tmp_table_move values ('u1','name1','2017-04-10',10000), ('u2','name2','2017-04-10',10001), ('u3','name3','2017-04-10',10002), ('u4','name4','2017-04-12',10001), ('u5','name5','2017-04-12',10002)
PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@tmp_table_move
POSTHOOK: query: INSERT INTO tmp_table_move values ('u1','name1','2017-04-10',10000), ('u2','name2','2017-04-10',10001), ('u3','name3','2017-04-10',10002), ('u4','name4','2017-04-12',10001), ('u5','name5','2017-04-12',10002)
POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@tmp_table_move
-POSTHOOK: Lineage: tmp_table_move.dt SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: tmp_table_move.id SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: tmp_table_move.name SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: tmp_table_move.pid EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: tmp_table_move.dt SCRIPT []
+POSTHOOK: Lineage: tmp_table_move.id SCRIPT []
+POSTHOOK: Lineage: tmp_table_move.name SCRIPT []
+POSTHOOK: Lineage: tmp_table_move.pid SCRIPT []
#### A masked pattern was here ####
PREHOOK: type: CREATETABLE
PREHOOK: Input: ### test.blobstore.path ###/s3_table_move