You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/10/27 20:57:03 UTC

svn commit: r1634671 [1/46] - in /hive/branches/spark: itests/src/test/resources/ ql/src/test/results/clientpositive/spark/

Author: xuefu
Date: Mon Oct 27 19:56:58 2014
New Revision: 1634671

URL: http://svn.apache.org/r1634671
Log:
HIVE-8422: Turn on all join .q tests [Spark Branch] (Chao via Xuefu)

Added:
    hive/branches/spark/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join0.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join11.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join12.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join13.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join15.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join16.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join17.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join19.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join20.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join21.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join22.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join23.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join24.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join25.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join27.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join28.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join29.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join30.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join31.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join32.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join_filters.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join_nulls.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_11.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_13.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_15.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/avro_joins.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/avro_joins_native.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/cross_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/decimal_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/filter_join_breaktask2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/infer_bucket_sort_convert_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join28.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join29.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join30.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join31.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join32.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join33.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join34.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join35.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join36.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join37.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join38.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join39.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join41.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_array.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_cond_pushdown_1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_cond_pushdown_3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_empty.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_filters.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_hive_626.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_literals.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_merging.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_nulls.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_rc.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_reorder.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_reorder2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_reorder3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_reorder4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_star.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_thrift.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_vc.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/join_view.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/leftsemijoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/leftsemijoin_mr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_addjar.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_distinct.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_filter_on_outerjoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_hook.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_memcheck.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_subquery2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mapjoin_test_outer.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mergejoins.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/mergejoins_mixed.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/multi_join_union.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/parquet_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/ppd_outer_join5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/reduce_deduplicate_exclude_join.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/semijoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt11.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt13.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt15.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt19.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt20.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/skewjoinopt9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin9.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_10.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_17.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_19.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_20.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_21.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_22.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_2.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_3.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_4.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_5.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_6.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_7.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_8.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/temp_table_join1.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/tez_join_tests.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/tez_joins_explain.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/uniquejoin.q.out
    hive/branches/spark/ql/src/test/results/clientpositive/spark/varchar_join1.q.out
Modified:
    hive/branches/spark/itests/src/test/resources/testconfiguration.properties

Modified: hive/branches/spark/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/src/test/resources/testconfiguration.properties?rev=1634671&r1=1634670&r2=1634671&view=diff
==============================================================================
--- hive/branches/spark/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/spark/itests/src/test/resources/testconfiguration.properties Mon Oct 27 19:56:58 2014
@@ -405,272 +405,478 @@ minimr.query.negative.files=cluster_task
   minimr_broken_pipe.q,\
   udf_local_resource.q
 
-spark.query.files=add_part_multiple.q \
-  alter_merge_orc.q \
-  alter_merge_stats_orc.q \
-  auto_smb_mapjoin_14.q \
-  avro_compression_enabled_native.q \
-  avro_decimal_native.q \
-  bucket2.q \
-  bucket3.q \
-  bucket4.q \
-  column_access_stats.q \
-  count.q \
-  create_merge_compressed.q \
-  ctas.q \
-  custom_input_output_format.q \
-  date_udf.q \
-  disable_merge_for_bucketing.q \
-  enforce_order.q \
-  escape_clusterby1.q \
-  escape_distributeby1.q \
-  escape_orderby1.q \
-  escape_sortby1.q \
-  groupby1.q \
-  groupby10.q \
-  groupby11.q \
-  groupby2.q \
-  groupby3.q \
-  groupby3_map.q \
-  groupby3_map_multi_distinct.q \
-  groupby3_map_skew.q \
-  groupby3_noskew.q \
-  groupby3_noskew_multi_distinct.q \
-  groupby4.q \
-  groupby7.q \
-  groupby7_map.q \
-  groupby7_map_multi_single_reducer.q \
-  groupby7_map_skew.q \
-  groupby7_noskew.q \
-  groupby7_noskew_multi_single_reducer.q \
-  groupby8.q \
-  groupby8_map.q \
-  groupby8_map_skew.q \
-  groupby8_noskew.q \
-  groupby9.q \
-  groupby_complex_types.q \
-  groupby_complex_types_multi_single_reducer.q \
-  groupby_cube1.q \
-  groupby_multi_insert_common_distinct.q \
-  groupby_multi_single_reducer.q \
-  groupby_multi_single_reducer2.q \
-  groupby_multi_single_reducer3.q \
-  groupby_position.q \
-  groupby_ppr.q \
-  groupby_rollup1.q \
-  groupby_sort_1_23.q \
-  groupby_sort_skew_1_23.q \
-  having.q \
-  innerjoin.q \
-  input12.q \
-  input13.q \
-  input14.q \
-  input17.q \
-  input18.q \
-  input1_limit.q \
-  input_part2.q \
-  insert1.q \
-  insert_into1.q \
-  insert_into2.q \
-  insert_into3.q \
-  join0.q \
-  join1.q \
-  join2.q \
-  join3.q \
-  join4.q \
-  join5.q \
-  join6.q \
-  join7.q \
-  join8.q \
-  join9.q \
-  join10.q \
-  join11.q \
-  join12.q \
-  join13.q \
-  join14.q \
-  join15.q \
-  join16.q \
-  join17.q \
-  join18.q \
-  join19.q \
-  join20.q \
-  join21.q \
-  join22.q \
-  join23.q \
-  join24.q \
-  join25.q \
-  join26.q \
-  join27.q \
-  join_1to1.q \
-  join_casesensitive.q \
-  join_nullsafe.q \
-  limit_pushdown.q \
-  load_dyn_part1.q \
-  load_dyn_part2.q \
-  load_dyn_part3.q \
-  mapreduce1.q \
-  mapreduce2.q \
-  merge1.q \
-  merge2.q \
-  metadata_only_queries.q \
-  metadata_only_queries_with_filters.q \
-  multi_insert.q \
-  multi_insert_gby.q \
-  multi_insert_gby2.q \
-  multi_insert_gby3.q \
-  multi_insert_lateral_view.q \
-  multi_insert_mixed.q \
-  multi_insert_move_tasks_share_dependencies.q \
-  multigroupby_singlemr.q \
-  optimize_nullscan.q \
-  order.q \
-  order2.q \
-  parallel.q \
+spark.query.files=add_part_multiple.q, \
+  alter_merge_orc.q, \
+  alter_merge_stats_orc.q, \
+  annotate_stats_join.q, \
+  auto_join0.q, \
+  auto_join1.q, \
+  auto_join10.q, \
+  auto_join11.q, \
+  auto_join12.q, \
+  auto_join13.q, \
+  auto_join14.q, \
+  auto_join15.q, \
+  auto_join16.q, \
+  auto_join17.q, \
+  auto_join18.q, \
+  auto_join18_multi_distinct.q, \
+  auto_join19.q, \
+  auto_join2.q, \
+  auto_join20.q, \
+  auto_join21.q, \
+  auto_join22.q, \
+  auto_join23.q, \
+  auto_join24.q, \
+  auto_join25.q, \
+  auto_join27.q, \
+  auto_join28.q, \
+  auto_join29.q, \
+  auto_join3.q, \
+  auto_join30.q, \
+  auto_join31.q, \
+  auto_join32.q, \
+  auto_join4.q, \
+  auto_join5.q, \
+  auto_join6.q, \
+  auto_join7.q, \
+  auto_join8.q, \
+  auto_join9.q, \
+  auto_join_filters.q, \
+  auto_join_nulls.q, \
+  auto_join_reordering_values.q, \
+  auto_smb_mapjoin_14.q, \
+  auto_sortmerge_join_1.q, \
+  auto_sortmerge_join_10.q, \
+  auto_sortmerge_join_11.q, \
+  auto_sortmerge_join_12.q, \
+  auto_sortmerge_join_13.q, \
+  auto_sortmerge_join_14.q, \
+  auto_sortmerge_join_15.q, \
+  auto_sortmerge_join_2.q, \
+  auto_sortmerge_join_3.q, \
+  auto_sortmerge_join_4.q, \
+  auto_sortmerge_join_5.q, \
+  auto_sortmerge_join_6.q, \
+  auto_sortmerge_join_7.q, \
+  auto_sortmerge_join_8.q, \
+  auto_sortmerge_join_9.q, \
+  avro_compression_enabled_native.q, \
+  avro_decimal_native.q, \
+  avro_joins.q, \
+  avro_joins_native.q, \
+  bucket2.q, \
+  bucket3.q, \
+  bucket4.q, \
+  bucket_map_join_1.q, \
+  bucket_map_join_2.q, \
+  bucket_map_join_tez1.q, \
+  bucket_map_join_tez2.q, \
+  bucketmapjoin1.q, \
+  bucketmapjoin10.q, \
+  bucketmapjoin11.q, \
+  bucketmapjoin12.q, \
+  bucketmapjoin13.q, \
+  bucketmapjoin2.q, \
+  bucketmapjoin3.q, \
+  bucketmapjoin4.q, \
+  bucketmapjoin5.q, \
+  bucketmapjoin6.q, \
+  bucketmapjoin7.q, \
+  bucketmapjoin8.q, \
+  bucketmapjoin9.q, \
+  bucketmapjoin_negative.q, \
+  bucketmapjoin_negative2.q, \
+  bucketmapjoin_negative3.q, \
+  column_access_stats.q, \
+  count.q, \
+  create_merge_compressed.q, \
+  cross_join.q, \
+  ctas.q, \
+  custom_input_output_format.q, \
+  date_udf.q, \
+  decimal_join.q, \
+  disable_merge_for_bucketing.q, \
+  enforce_order.q, \
+  escape_clusterby1.q, \
+  escape_distributeby1.q, \
+  escape_orderby1.q, \
+  escape_sortby1.q, \
+  groupby1.q, \
+  groupby10.q, \
+  groupby11.q, \
+  groupby2.q, \
+  groupby3.q, \
+  groupby3_map.q, \
+  groupby3_map_multi_distinct.q, \
+  groupby3_map_skew.q, \
+  groupby3_noskew.q, \
+  groupby3_noskew_multi_distinct.q, \
+  groupby4.q, \
+  groupby7.q, \
+  groupby7_map.q, \
+  groupby7_map_multi_single_reducer.q, \
+  groupby7_map_skew.q, \
+  groupby7_noskew.q, \
+  groupby7_noskew_multi_single_reducer.q, \
+  groupby8.q, \
+  groupby8_map.q, \
+  groupby8_map_skew.q, \
+  groupby8_noskew.q, \
+  groupby9.q, \
+  groupby_complex_types.q, \
+  groupby_complex_types_multi_single_reducer.q, \
+  groupby_cube1.q, \
+  groupby_multi_insert_common_distinct.q, \
+  groupby_multi_single_reducer.q, \
+  groupby_multi_single_reducer2.q, \
+  groupby_multi_single_reducer3.q, \
+  groupby_position.q, \
+  groupby_ppr.q, \
+  groupby_rollup1.q, \
+  groupby_sort_1_23.q, \
+  groupby_sort_skew_1_23.q, \
+  having.q, \
+  index_auto_self_join.q, \
+  infer_bucket_sort_convert_join.q, \
+  innerjoin.q, \
+  input12.q, \
+  input13.q, \
+  input14.q, \
+  input17.q, \
+  input18.q, \
+  input1_limit.q, \
+  input_part2.q, \
+  insert1.q, \
+  insert_into1.q, \
+  insert_into2.q, \
+  insert_into3.q, \
+  join0.q, \
+  join1.q, \
+  join10.q, \
+  join11.q, \
+  join12.q, \
+  join13.q, \
+  join14.q, \
+  join15.q, \
+  join16.q, \
+  join17.q, \
+  join18.q, \
+  join18_multi_distinct.q, \
+  join19.q, \
+  join2.q, \
+  join20.q, \
+  join21.q, \
+  join22.q, \
+  join23.q, \
+  join24.q, \
+  join25.q, \
+  join26.q, \
+  join27.q, \
+  join28.q, \
+  join29.q, \
+  join3.q, \
+  join30.q, \
+  join31.q, \
+  join32.q, \
+  join32_lessSize.q, \
+  join33.q, \
+  join34.q, \
+  join35.q, \
+  join36.q, \
+  join37.q, \
+  join38.q, \
+  join39.q, \
+  join4.q, \
+  join41.q, \
+  join5.q, \
+  join6.q, \
+  join7.q, \
+  join8.q, \
+  join9.q, \
+  join_1to1.q, \
+  join_alt_syntax.q, \
+  join_array.q, \
+  join_casesensitive.q, \
+  join_cond_pushdown_1.q, \
+  join_cond_pushdown_2.q, \
+  join_cond_pushdown_3.q, \
+  join_cond_pushdown_4.q, \
+  join_cond_pushdown_unqual1.q, \
+  join_cond_pushdown_unqual2.q, \
+  join_cond_pushdown_unqual3.q, \
+  join_cond_pushdown_unqual4.q, \
+  join_empty.q, \
+  join_filters.q, \
+  join_filters_overlap.q, \
+  join_hive_626.q, \
+  join_literals.q, \
+  join_map_ppr.q, \
+  join_merge_multi_expressions.q, \
+  join_merging.q, \
+  join_nulls.q, \
+  join_nullsafe.q, \
+  join_rc.q, \
+  join_reorder.q, \
+  join_reorder2.q, \
+  join_reorder3.q, \
+  join_reorder4.q, \
+  join_star.q, \
+  join_thrift.q, \
+  join_vc.q, \
+  join_view.q, \
+  leftsemijoin.q, \
+  leftsemijoin_mr.q, \
+  limit_pushdown.q, \
+  load_dyn_part1.q, \
+  load_dyn_part10.q, \
+  load_dyn_part11.q, \
+  load_dyn_part12.q, \
+  load_dyn_part13.q, \
+  load_dyn_part14.q, \
+  load_dyn_part15.q, \
+  load_dyn_part2.q, \
+  load_dyn_part3.q, \
+  load_dyn_part4.q, \
+  load_dyn_part5.q, \
+  load_dyn_part6.q, \
+  load_dyn_part7.q, \
+  load_dyn_part8.q, \
+  load_dyn_part9.q, \
+  louter_join_ppr.q, \
+  mapjoin_addjar.q, \
+  mapjoin_decimal.q, \
+  mapjoin_distinct.q, \
+  mapjoin_filter_on_outerjoin.q, \
+  mapjoin_hook.q, \
+  mapjoin_mapjoin.q, \
+  mapjoin_memcheck.q, \
+  mapjoin_subquery.q, \
+  mapjoin_subquery2.q, \
+  mapjoin_test_outer.q, \
+  mapreduce1.q, \
+  mapreduce2.q, \
+  merge1.q, \
+  merge2.q, \
+  mergejoins.q, \
+  mergejoins_mixed.q, \
+  metadata_only_queries.q, \
+  metadata_only_queries_with_filters.q, \
+  multi_insert.q, \
+  multi_insert_gby.q, \
+  multi_insert_gby2.q, \
+  multi_insert_gby3.q, \
+  multi_insert_lateral_view.q, \
+  multi_insert_mixed.q, \
+  multi_insert_move_tasks_share_dependencies.q, \
+  multi_join_union.q, \
+  multigroupby_singlemr.q, \
+  optimize_nullscan.q, \
+  order.q, \
+  order2.q, \
+  outer_join_ppr.q, \
+  parallel.q, \
   parallel_join0.q, \
   parallel_join1.q, \
-  pcr.q \
-  ppd_multi_insert.q \
-  ppd_transform.q \
-  ptf_decimal.q \
-  ptf_general_queries.q \
-  ptf_matchpath.q \
-  ptf_rcfile.q \
-  ptf_register_tblfn.q \
-  ptf_seqfile.q \
-  sample1.q \
-  sample2.q \
-  sample3.q \
-  sample4.q \
-  sample5.q \
-  sample6.q \
-  sample7.q \
-  sample8.q \
-  sample9.q \
-  sample10.q \
-  script_env_var1.q \
-  script_env_var2.q \
-  script_pipe.q \
-  scriptfile1.q \
-  smb_mapjoin_13.q \
-  smb_mapjoin_15.q \
-  smb_mapjoin_16.q \
-  sort.q \
-  spark_test.q \
-  subquery_multiinsert.q \
-  table_access_keys_stats.q \
-  temp_table.q \
-  timestamp_1.q \
-  timestamp_2.q \
-  timestamp_3.q \
-  timestamp_comparison.q \
-  timestamp_lazy.q \
-  timestamp_null.q \
-  timestamp_udf.q \
-  transform_ppr1.q \
-  transform_ppr2.q \
-  transform1.q \
-  transform2.q \
-  union.q \
-  union10.q \
-  union11.q \
-  union13.q \
-  union14.q \
-  union15.q \
-  union16.q \
-  union18.q \
-  union19.q \
-  union2.q \
-  union23.q \
-  union25.q \
-  union28.q \
-  union29.q \
-  union3.q \
-  union30.q \
-  union33.q \
-  union4.q \
-  union5.q \
-  union6.q \
-  union7.q \
-  union8.q \
-  union9.q \
-  union_null.q \
-  union_ppr.q \
-  union_remove_1.q \
-  union_remove_10.q \
-  union_remove_11.q \
-  union_remove_15.q \
-  union_remove_16.q \
-  union_remove_17.q \
-  union_remove_18.q \
-  union_remove_19.q \
-  union_remove_2.q \
-  union_remove_20.q \
-  union_remove_21.q \
-  union_remove_24.q \
-  union_remove_25.q \
-  union_remove_3.q \
-  union_remove_4.q \
-  union_remove_5.q \
-  union_remove_6.q \
-  union_remove_7.q \
-  union_remove_8.q \
-  union_remove_9.q \
-  load_dyn_part4.q \
-  load_dyn_part5.q \
-  load_dyn_part6.q \
-  load_dyn_part7.q \
-  load_dyn_part8.q \
-  load_dyn_part9.q \
-  load_dyn_part10.q \
-  load_dyn_part11.q \
-  load_dyn_part12.q \
-  load_dyn_part13.q \
-  load_dyn_part14.q \
-  load_dyn_part15.q \
-  vector_between_in.q \
-  vector_cast_constant.q \
-  vector_char_4.q \
-  vector_count_distinct.q \
-  vector_data_types.q \
-  vector_decimal_aggregate.q \
-  vector_elt.q \
-  vector_left_outer_join.q \
-  vector_orderby_5.q \
-  vector_string_concat.q \
-  vector_varchar_4.q \
-  vectorization_0.q \
-  vectorization_1.q \
-  vectorization_10.q \
-  vectorization_11.q \
-  vectorization_12.q \
-  vectorization_13.q \
-  vectorization_14.q \
-  vectorization_15.q \
-  vectorization_2.q \
-  vectorization_3.q \
-  vectorization_4.q \
-  vectorization_5.q \
-  vectorization_6.q \
-  vectorization_9.q \
-  vectorization_decimal_date.q \
-  vectorization_div0.q \
-  vectorization_nested_udf.q \
-  vectorization_not.q \
-  vectorization_part.q \
-  vectorization_part_project.q \
-  vectorization_pushdown.q \
-  vectorized_bucketmapjoin1.q \
-  vectorized_case.q \
-  vectorized_mapjoin.q \
+  parquet_join.q, \
+  pcr.q, \
+  ppd_gby_join.q, \
+  ppd_join.q, \
+  ppd_join2.q, \
+  ppd_join3.q, \
+  ppd_join4.q, \
+  ppd_join5.q, \
+  ppd_join_filter.q, \
+  ppd_multi_insert.q, \
+  ppd_outer_join1.q, \
+  ppd_outer_join2.q, \
+  ppd_outer_join3.q, \
+  ppd_outer_join4.q, \
+  ppd_outer_join5.q, \
+  ppd_transform.q, \
+  ptf_decimal.q, \
+  ptf_general_queries.q, \
+  ptf_matchpath.q, \
+  ptf_rcfile.q, \
+  ptf_register_tblfn.q, \
+  ptf_seqfile.q, \
+  reduce_deduplicate_exclude_join.q, \
+  router_join_ppr.q, \
+  sample1.q, \
+  sample10.q, \
+  sample2.q, \
+  sample3.q, \
+  sample4.q, \
+  sample5.q, \
+  sample6.q, \
+  sample7.q, \
+  sample8.q, \
+  sample9.q, \
+  script_env_var1.q, \
+  script_env_var2.q, \
+  script_pipe.q, \
+  scriptfile1.q, \
+  semijoin.q, \
+  skewjoin.q, \
+  skewjoin_noskew.q, \
+  skewjoin_union_remove_1.q, \
+  skewjoin_union_remove_2.q, \
+  skewjoinopt1.q, \
+  skewjoinopt10.q, \
+  skewjoinopt11.q, \
+  skewjoinopt12.q, \
+  skewjoinopt13.q, \
+  skewjoinopt14.q, \
+  skewjoinopt15.q, \
+  skewjoinopt16.q, \
+  skewjoinopt17.q, \
+  skewjoinopt18.q, \
+  skewjoinopt19.q, \
+  skewjoinopt20.q, \
+  skewjoinopt3.q, \
+  skewjoinopt4.q, \
+  skewjoinopt5.q, \
+  skewjoinopt6.q, \
+  skewjoinopt7.q, \
+  skewjoinopt8.q, \
+  skewjoinopt9.q, \
+  smb_mapjoin9.q, \
+  smb_mapjoin_1.q, \
+  smb_mapjoin_10.q, \
+  smb_mapjoin_13.q, \
+  smb_mapjoin_14.q, \
+  smb_mapjoin_15.q, \
+  smb_mapjoin_16.q, \
+  smb_mapjoin_17.q, \
+  smb_mapjoin_18.q, \
+  smb_mapjoin_19.q, \
+  smb_mapjoin_2.q, \
+  smb_mapjoin_20.q, \
+  smb_mapjoin_21.q, \
+  smb_mapjoin_22.q, \
+  smb_mapjoin_25.q, \
+  smb_mapjoin_3.q, \
+  smb_mapjoin_4.q, \
+  smb_mapjoin_5.q, \
+  smb_mapjoin_6.q, \
+  smb_mapjoin_7.q, \
+  smb_mapjoin_8.q, \
+  sort.q, \
+  sort_merge_join_desc_1.q, \
+  sort_merge_join_desc_2.q, \
+  sort_merge_join_desc_3.q, \
+  sort_merge_join_desc_4.q, \
+  sort_merge_join_desc_5.q, \
+  sort_merge_join_desc_6.q, \
+  sort_merge_join_desc_7.q, \
+  sort_merge_join_desc_8.q, \
+  spark_test.q, \
+  subquery_exists.q, \
+  subquery_in.q, \
+  subquery_multiinsert.q, \
+  table_access_keys_stats.q, \
+  temp_table.q, \
+  temp_table_join1.q, \
+  tez_join_tests.q, \
+  tez_joins_explain.q, \
+  timestamp_1.q, \
+  timestamp_2.q, \
+  timestamp_3.q, \
+  timestamp_comparison.q, \
+  timestamp_lazy.q, \
+  timestamp_null.q, \
+  timestamp_udf.q, \
+  transform1.q, \
+  transform2.q, \
+  transform_ppr1.q, \
+  transform_ppr2.q, \
+  union.q, \
+  union10.q, \
+  union11.q, \
+  union13.q, \
+  union14.q, \
+  union15.q, \
+  union16.q, \
+  union18.q, \
+  union19.q, \
+  union2.q, \
+  union23.q, \
+  union25.q, \
+  union28.q, \
+  union29.q, \
+  union3.q, \
+  union30.q, \
+  union33.q, \
+  union4.q, \
+  union5.q, \
+  union6.q, \
+  union7.q, \
+  union8.q, \
+  union9.q, \
+  union_null.q, \
+  union_ppr.q, \
+  union_remove_1.q, \
+  union_remove_10.q, \
+  union_remove_11.q, \
+  union_remove_15.q, \
+  union_remove_16.q, \
+  union_remove_17.q, \
+  union_remove_18.q, \
+  union_remove_19.q, \
+  union_remove_2.q, \
+  union_remove_20.q, \
+  union_remove_21.q, \
+  union_remove_24.q, \
+  union_remove_25.q, \
+  union_remove_3.q, \
+  union_remove_4.q, \
+  union_remove_5.q, \
+  union_remove_6.q, \
+  union_remove_7.q, \
+  union_remove_8.q, \
+  union_remove_9.q, \
+  uniquejoin.q, \
+  varchar_join1.q, \
+  vector_between_in.q, \
+  vector_cast_constant.q, \
+  vector_char_4.q, \
+  vector_count_distinct.q, \
+  vector_data_types.q, \
+  vector_decimal_aggregate.q, \
+  vector_elt.q, \
+  vector_left_outer_join.q, \
+  vector_orderby_5.q, \
+  vector_string_concat.q, \
+  vector_varchar_4.q, \
+  vectorization_0.q, \
+  vectorization_1.q, \
+  vectorization_10.q, \
+  vectorization_11.q, \
+  vectorization_12.q, \
+  vectorization_13.q, \
+  vectorization_14.q, \
+  vectorization_15.q, \
+  vectorization_2.q, \
+  vectorization_3.q, \
+  vectorization_4.q, \
+  vectorization_5.q, \
+  vectorization_6.q, \
+  vectorization_9.q, \
+  vectorization_decimal_date.q, \
+  vectorization_div0.q, \
+  vectorization_nested_udf.q, \
+  vectorization_not.q, \
+  vectorization_part.q, \
+  vectorization_part_project.q, \
+  vectorization_pushdown.q, \
+  vectorized_bucketmapjoin1.q, \
+  vectorized_case.q, \
+  vectorized_mapjoin.q, \
   vectorized_math_funcs.q,\
-  vectorized_nested_mapjoin.q \
-  vectorized_ptf.q \
-  vectorized_rcfile_columnar.q \
-  vectorized_shufflejoin.q \
-  vectorized_string_funcs.q \
-  vectorized_timestamp_funcs.q \
-  windowing.q \
-  subquery_exists.q \
-  subquery_in.q \
-  auto_sortmerge_join_1.q \
-  skewjoin.q
+  vectorized_nested_mapjoin.q, \
+  vectorized_ptf.q, \
+  vectorized_rcfile_columnar.q, \
+  vectorized_shufflejoin.q, \
+  vectorized_string_funcs.q, \
+  vectorized_timestamp_funcs.q, \
+  windowing.q, \

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,720 @@
+PREHOOK: query: create table if not exists emp (
+  lastname string,
+  deptid int,
+  locid int
+) row format delimited fields terminated by '|' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@emp
+POSTHOOK: query: create table if not exists emp (
+  lastname string,
+  deptid int,
+  locid int
+) row format delimited fields terminated by '|' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@emp
+PREHOOK: query: create table if not exists dept (
+  deptid int,
+  deptname string
+) row format delimited fields terminated by '|' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dept
+POSTHOOK: query: create table if not exists dept (
+  deptid int,
+  deptname string
+) row format delimited fields terminated by '|' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dept
+PREHOOK: query: create table if not exists loc (
+  state string,
+  locid int,
+  zip bigint,
+  year int
+) row format delimited fields terminated by '|' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@loc
+POSTHOOK: query: create table if not exists loc (
+  state string,
+  locid int,
+  zip bigint,
+  year int
+) row format delimited fields terminated by '|' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@loc
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@emp
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@emp
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@dept
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@dept
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@loc
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@loc
+PREHOOK: query: analyze table emp compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emp
+PREHOOK: Output: default@emp
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: analyze table emp compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emp
+POSTHOOK: Output: default@emp
+PREHOOK: query: analyze table dept compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dept
+PREHOOK: Output: default@dept
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: analyze table dept compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dept
+POSTHOOK: Output: default@dept
+PREHOOK: query: analyze table loc compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc
+PREHOOK: Output: default@loc
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: analyze table loc compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc
+POSTHOOK: Output: default@loc
+PREHOOK: query: analyze table emp compute statistics for columns lastname,deptid,locid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emp
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table emp compute statistics for columns lastname,deptid,locid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emp
+#### A masked pattern was here ####
+PREHOOK: query: analyze table dept compute statistics for columns deptname,deptid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dept
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table dept compute statistics for columns deptname,deptid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dept
+#### A masked pattern was here ####
+PREHOOK: query: analyze table loc compute statistics for columns state,locid,zip,year
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table loc compute statistics for columns state,locid,zip,year
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc
+#### A masked pattern was here ####
+PREHOOK: query: -- number of rows
+-- emp  - 48
+-- dept - 6
+-- loc  - 8
+
+-- count distincts for relevant columns (since count distinct values are approximate in some cases count distint values will be greater than number of rows)
+-- emp.deptid - 3
+-- emp.lastname - 6
+-- emp.locid - 7
+-- dept.deptid - 7
+-- dept.deptname - 6
+-- loc.locid - 7
+-- loc.state - 6
+
+-- 2 relations, 1 attribute
+-- Expected output rows: (48*6)/max(3,7) = 41
+explain select * from emp e join dept d on (e.deptid = d.deptid)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- number of rows
+-- emp  - 48
+-- dept - 6
+-- loc  - 8
+
+-- count distincts for relevant columns (since count distinct values are approximate in some cases count distint values will be greater than number of rows)
+-- emp.deptid - 3
+-- emp.lastname - 6
+-- emp.locid - 7
+-- dept.deptid - 7
+-- dept.deptname - 6
+-- loc.locid - 7
+-- loc.state - 6
+
+-- 2 relations, 1 attribute
+-- Expected output rows: (48*6)/max(3,7) = 41
+explain select * from emp e join dept d on (e.deptid = d.deptid)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  Statistics: Num rows: 0 Data size: 68 Basic stats: PARTIAL Column stats: COMPLETE
+                  Filter Operator
+                    predicate: deptid is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int)
+                      sort order: +
+                      Map-reduce partition columns: deptid (type: int)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                      value expressions: deptname (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: e
+                  Statistics: Num rows: 5 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: deptid is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int)
+                      sort order: +
+                      Map-reduce partition columns: deptid (type: int)
+                      Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: lastname (type: string), locid (type: int)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
+                  1 {KEY.reducesinkkey0} {VALUE._col0}
+                outputColumnNames: _col0, _col1, _col2, _col6, _col7
+                Statistics: Num rows: 5 Data size: 544 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                  Statistics: Num rows: 5 Data size: 544 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 544 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- 2 relations, 2 attributes
+-- Expected output rows: (48*6)/(max(3,7) * max(6,6)) = 6
+explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 2 relations, 2 attributes
+-- Expected output rows: (48*6)/(max(3,7) * max(6,6)) = 6
+explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: dept
+                  Statistics: Num rows: 0 Data size: 68 Basic stats: PARTIAL Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (deptid is not null and deptname is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int), deptname (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: deptid (type: int), deptname (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: emp
+                  Statistics: Num rows: 5 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (deptid is not null and lastname is not null) (type: boolean)
+                    Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int), lastname (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: deptid (type: int), lastname (type: string)
+                      Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: locid (type: int)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
+                outputColumnNames: _col0, _col1, _col2, _col6, _col7
+                Statistics: Num rows: 5 Data size: 544 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: ((_col1 = _col6) and (_col0 = _col7)) (type: boolean)
+                  Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                    Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  Statistics: Num rows: 0 Data size: 68 Basic stats: PARTIAL Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (deptid is not null and deptname is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int), deptname (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: deptid (type: int), deptname (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: e
+                  Statistics: Num rows: 5 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (deptid is not null and lastname is not null) (type: boolean)
+                    Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int), lastname (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: deptid (type: int), lastname (type: string)
+                      Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: locid (type: int)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
+                outputColumnNames: _col0, _col1, _col2, _col6, _col7
+                Statistics: Num rows: 5 Data size: 544 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                  Statistics: Num rows: 5 Data size: 544 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 544 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- 2 relations, 3 attributes
+-- Expected output rows: (48*6)/(max(3,7) * max(6,6) * max(6,6)) = 1
+explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 2 relations, 3 attributes
+-- Expected output rows: (48*6)/(max(3,7) * max(6,6) * max(6,6)) = 1
+explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: dept
+                  Statistics: Num rows: 0 Data size: 68 Basic stats: PARTIAL Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (deptid is not null and deptname is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int), deptname (type: string), deptname (type: string)
+                      sort order: +++
+                      Map-reduce partition columns: deptid (type: int), deptname (type: string), deptname (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: emp
+                  Statistics: Num rows: 5 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (deptid is not null and lastname is not null) (type: boolean)
+                    Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int), lastname (type: string), lastname (type: string)
+                      sort order: +++
+                      Map-reduce partition columns: deptid (type: int), lastname (type: string), lastname (type: string)
+                      Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: locid (type: int)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
+                outputColumnNames: _col0, _col1, _col2, _col6, _col7
+                Statistics: Num rows: 5 Data size: 544 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (((_col1 = _col6) and (_col0 = _col7)) and (_col7 = _col0)) (type: boolean)
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- 3 relations, 1 attribute
+-- Expected output rows: (48*6*48)/top2largest(3,7,3) = 658
+explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 relations, 1 attribute
+-- Expected output rows: (48*6*48)/top2largest(3,7,3) = 658
+explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1), Map 4 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  Statistics: Num rows: 0 Data size: 68 Basic stats: PARTIAL Column stats: COMPLETE
+                  Filter Operator
+                    predicate: deptid is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int)
+                      sort order: +
+                      Map-reduce partition columns: deptid (type: int)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                      value expressions: deptname (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: e1
+                  Statistics: Num rows: 5 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: deptid is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int)
+                      sort order: +
+                      Map-reduce partition columns: deptid (type: int)
+                      Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: lastname (type: string), locid (type: int)
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: e
+                  Statistics: Num rows: 5 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: deptid is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int)
+                      sort order: +
+                      Map-reduce partition columns: deptid (type: int)
+                      Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: lastname (type: string), locid (type: int)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Inner Join 0 to 2
+                condition expressions:
+                  0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
+                  1 {KEY.reducesinkkey0} {VALUE._col0}
+                  2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
+                outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13
+                Statistics: Num rows: 11 Data size: 1089 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: int)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+                  Statistics: Num rows: 11 Data size: 1089 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 11 Data size: 1089 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- Expected output rows: (48*6*8)/top2largest(3,7,7) = 47
+explain select * from emp e join dept d  on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Expected output rows: (48*6*8)/top2largest(3,7,7) = 47
+explain select * from emp e join dept d  on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1), Map 4 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  Statistics: Num rows: 0 Data size: 68 Basic stats: PARTIAL Column stats: COMPLETE
+                  Filter Operator
+                    predicate: deptid is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int)
+                      sort order: +
+                      Map-reduce partition columns: deptid (type: int)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                      value expressions: deptname (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: e
+                  Statistics: Num rows: 5 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: deptid is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int)
+                      sort order: +
+                      Map-reduce partition columns: deptid (type: int)
+                      Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: lastname (type: string), locid (type: int)
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: l
+                  Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: locid is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: locid (type: int)
+                      sort order: +
+                      Map-reduce partition columns: locid (type: int)
+                      Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: state (type: string), zip (type: bigint), year (type: int)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Inner Join 0 to 2
+                condition expressions:
+                  0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
+                  1 {KEY.reducesinkkey0} {VALUE._col0}
+                  2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2}
+                outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13, _col14
+                Statistics: Num rows: 11 Data size: 1089 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: bigint), _col14 (type: int)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                  Statistics: Num rows: 11 Data size: 1089 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 11 Data size: 1089 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- 3 relations and 2 attribute
+-- Expected output rows: (48*6*8)/top2largest(3,7,7)*top2largest(6,6,6) = 1
+explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 relations and 2 attribute
+-- Expected output rows: (48*6*8)/top2largest(3,7,7)*top2largest(6,6,6) = 1
+explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1), Map 4 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  Statistics: Num rows: 0 Data size: 68 Basic stats: PARTIAL Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (deptid is not null and deptname is not null) (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int), deptname (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: deptid (type: int), deptname (type: string)
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: e
+                  Statistics: Num rows: 5 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (deptid is not null and lastname is not null) (type: boolean)
+                    Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: deptid (type: int), lastname (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: deptid (type: int), lastname (type: string)
+                      Statistics: Num rows: 5 Data size: 495 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: locid (type: int)
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: l
+                  Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (locid is not null and state is not null) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: locid (type: int), state (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: locid (type: int), state (type: string)
+                      Statistics: Num rows: 1 Data size: 90 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: zip (type: bigint), year (type: int)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Inner Join 0 to 2
+                condition expressions:
+                  0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
+                  1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
+                  2 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1}
+                outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13, _col14
+                Statistics: Num rows: 11 Data size: 1089 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: bigint), _col14 (type: int)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                  Statistics: Num rows: 11 Data size: 1089 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 11 Data size: 1089 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join0.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join0.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join0.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join0.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,150 @@
+PREHOOK: query: explain 
+select sum(hash(a.k1,a.v1,a.k2, a.v2))
+from (
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+) a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select sum(hash(a.k1,a.v1,a.k2, a.v2))
+from (
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+) a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 5 (GROUP PARTITION-LEVEL SORT, 1)
+        Reducer 3 <- Reducer 2 (GROUP SORT, 1)
+        Reducer 4 <- Reducer 3 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 10) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string), _col1 (type: string)
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 10) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string), _col1 (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {VALUE._col0} {VALUE._col1}
+                  1 {VALUE._col0} {VALUE._col1}
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                    sort order: ++++
+                    Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: sum(hash(_col0,_col1,_col2,_col3))
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 4 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: bigint)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2))
+from (
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+) a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2))
+from (
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+) a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+34441656720

Added: hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join1.q.out?rev=1634671&view=auto
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join1.q.out (added)
+++ hive/branches/spark/ql/src/test/results/clientpositive/spark/auto_join1.q.out Mon Oct 27 19:56:58 2014
@@ -0,0 +1,117 @@
+PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_j1
+PREHOOK: query: explain
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 1), Map 3 (GROUP PARTITION-LEVEL SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src2
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                condition expressions:
+                  0 {KEY.reducesinkkey0}
+                  1 {VALUE._col0}
+                outputColumnNames: _col0, _col6
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: UDFToInteger(_col0) (type: int), _col6 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest_j1
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest_j1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest_j1
+[Error 30017]: Skipping stats aggregation by error org.apache.hadoop.hive.ql.metadata.HiveException: [Error 30015]: Stats aggregator of type counter cannot be connected to
+POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest_j1
+POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src2.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+101861029915