You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2014/08/18 19:05:07 UTC

svn commit: r1618664 [1/13] - in /hive/trunk: hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ ql/src/java/org/apache/hadoop/hive/ql/io/ ql/src/ja...

Author: gates
Date: Mon Aug 18 17:05:02 2014
New Revision: 1618664

URL: http://svn.apache.org/r1618664
Log:
HIVE-7513 Add ROW__ID VirtualColumn (Eugene Koifman via Alan Gates)

Modified:
    hive/trunk/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
    hive/trunk/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out
    hive/trunk/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out
    hive/trunk/ql/src/test/results/clientnegative/udf_assert_true.q.out
    hive/trunk/ql/src/test/results/clientnegative/udf_assert_true2.q.out
    hive/trunk/ql/src/test/results/clientpositive/allcolref_in_udf.q.out
    hive/trunk/ql/src/test/results/clientpositive/annotate_stats_join.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join1.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join14.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join15.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join17.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join19.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join2.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join20.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join21.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join22.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join23.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join28.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join29.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join3.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join32.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join9.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_smb_mapjoin_14.q.out
    hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin1.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin2.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin3.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin4.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin5.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin7.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_6.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_7.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out
    hive/trunk/ql/src/test/results/clientpositive/cluster.q.out
    hive/trunk/ql/src/test/results/clientpositive/column_access_stats.q.out
    hive/trunk/ql/src/test/results/clientpositive/constprog2.q.out
    hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer1.q.out
    hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer4.q.out
    hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer5.q.out
    hive/trunk/ql/src/test/results/clientpositive/correlationoptimizer6.q.out
    hive/trunk/ql/src/test/results/clientpositive/cross_product_check_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/cross_product_check_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/explain_logical.q.out
    hive/trunk/ql/src/test/results/clientpositive/filter_join_breaktask.q.out
    hive/trunk/ql/src/test/results/clientpositive/index_auto_self_join.q.out
    hive/trunk/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
    hive/trunk/ql/src/test/results/clientpositive/innerjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/input23.q.out
    hive/trunk/ql/src/test/results/clientpositive/join1.q.out
    hive/trunk/ql/src/test/results/clientpositive/join14.q.out
    hive/trunk/ql/src/test/results/clientpositive/join15.q.out
    hive/trunk/ql/src/test/results/clientpositive/join17.q.out
    hive/trunk/ql/src/test/results/clientpositive/join2.q.out
    hive/trunk/ql/src/test/results/clientpositive/join20.q.out
    hive/trunk/ql/src/test/results/clientpositive/join21.q.out
    hive/trunk/ql/src/test/results/clientpositive/join22.q.out
    hive/trunk/ql/src/test/results/clientpositive/join23.q.out
    hive/trunk/ql/src/test/results/clientpositive/join25.q.out
    hive/trunk/ql/src/test/results/clientpositive/join26.q.out
    hive/trunk/ql/src/test/results/clientpositive/join27.q.out
    hive/trunk/ql/src/test/results/clientpositive/join3.q.out
    hive/trunk/ql/src/test/results/clientpositive/join32.q.out
    hive/trunk/ql/src/test/results/clientpositive/join32_lessSize.q.out
    hive/trunk/ql/src/test/results/clientpositive/join33.q.out
    hive/trunk/ql/src/test/results/clientpositive/join36.q.out
    hive/trunk/ql/src/test/results/clientpositive/join37.q.out
    hive/trunk/ql/src/test/results/clientpositive/join38.q.out
    hive/trunk/ql/src/test/results/clientpositive/join39.q.out
    hive/trunk/ql/src/test/results/clientpositive/join40.q.out
    hive/trunk/ql/src/test/results/clientpositive/join41.q.out
    hive/trunk/ql/src/test/results/clientpositive/join9.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_alt_syntax.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_cond_pushdown_unqual2.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_filters_overlap.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_hive_626.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_map_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_merging.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_nullsafe.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_rc.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_reorder.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_reorder2.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_reorder3.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_reorder4.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_star.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_thrift.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_vc.q.out
    hive/trunk/ql/src/test/results/clientpositive/join_view.q.out
    hive/trunk/ql/src/test/results/clientpositive/lateral_view.q.out
    hive/trunk/ql/src/test/results/clientpositive/lateral_view_cp.q.out
    hive/trunk/ql/src/test/results/clientpositive/lateral_view_noalias.q.out
    hive/trunk/ql/src/test/results/clientpositive/lateral_view_outer.q.out
    hive/trunk/ql/src/test/results/clientpositive/lateral_view_ppd.q.out
    hive/trunk/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out
    hive/trunk/ql/src/test/results/clientpositive/lineage1.q.out
    hive/trunk/ql/src/test/results/clientpositive/louter_join_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/mapjoin1.q.out
    hive/trunk/ql/src/test/results/clientpositive/mapjoin_filter_on_outerjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/mapjoin_memcheck.q.out
    hive/trunk/ql/src/test/results/clientpositive/mapjoin_subquery2.q.out
    hive/trunk/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out
    hive/trunk/ql/src/test/results/clientpositive/mergejoins.q.out
    hive/trunk/ql/src/test/results/clientpositive/mergejoins_mixed.q.out
    hive/trunk/ql/src/test/results/clientpositive/multiMapJoin1.q.out
    hive/trunk/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
    hive/trunk/ql/src/test/results/clientpositive/multi_join_union.q.out
    hive/trunk/ql/src/test/results/clientpositive/no_hooks.q.out
    hive/trunk/ql/src/test/results/clientpositive/nonmr_fetch.q.out
    hive/trunk/ql/src/test/results/clientpositive/optional_outer.q.out
    hive/trunk/ql/src/test/results/clientpositive/outer_join_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/pcr.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_join5.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_join_filter.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_outer_join1.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_outer_join2.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_outer_join3.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_outer_join5.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_udf_case.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_union_view.q.out
    hive/trunk/ql/src/test/results/clientpositive/ppd_vc.q.out
    hive/trunk/ql/src/test/results/clientpositive/ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/quotedid_skew.q.out
    hive/trunk/ql/src/test/results/clientpositive/regex_col.q.out
    hive/trunk/ql/src/test/results/clientpositive/router_join_ppr.q.out
    hive/trunk/ql/src/test/results/clientpositive/sample8.q.out
    hive/trunk/ql/src/test/results/clientpositive/semijoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt1.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt10.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt11.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt12.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt13.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt14.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt15.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt16.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt17.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt18.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt19.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt2.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt20.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt3.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt4.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt5.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt6.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt7.q.out
    hive/trunk/ql/src/test/results/clientpositive/skewjoinopt8.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin9.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_4.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_5.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_6.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out
    hive/trunk/ql/src/test/results/clientpositive/stats11.q.out
    hive/trunk/ql/src/test/results/clientpositive/subquery_multiinsert.q.out
    hive/trunk/ql/src/test/results/clientpositive/subquery_notexists.q.out
    hive/trunk/ql/src/test/results/clientpositive/subquery_notin.q.out
    hive/trunk/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
    hive/trunk/ql/src/test/results/clientpositive/subquery_views.q.out
    hive/trunk/ql/src/test/results/clientpositive/temp_table_join1.q.out
    hive/trunk/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/auto_join1.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/join1.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/mrr.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/tez_join_tests.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/tez_union.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/udtf_json_tuple.q.out
    hive/trunk/ql/src/test/results/clientpositive/udtf_parse_url_tuple.q.out
    hive/trunk/ql/src/test/results/clientpositive/udtf_stack.q.out
    hive/trunk/ql/src/test/results/clientpositive/union22.q.out
    hive/trunk/ql/src/test/results/clientpositive/union24.q.out
    hive/trunk/ql/src/test/results/clientpositive/union26.q.out
    hive/trunk/ql/src/test/results/clientpositive/union27.q.out
    hive/trunk/ql/src/test/results/clientpositive/union32.q.out
    hive/trunk/ql/src/test/results/clientpositive/union_remove_12.q.out
    hive/trunk/ql/src/test/results/clientpositive/union_remove_13.q.out
    hive/trunk/ql/src/test/results/clientpositive/union_remove_14.q.out
    hive/trunk/ql/src/test/results/clientpositive/union_top_level.q.out
    hive/trunk/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/vectorized_bucketmapjoin1.q.out
    hive/trunk/ql/src/test/results/clientpositive/vectorized_context.q.out
    hive/trunk/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/vectorized_ptf.q.out
    hive/trunk/ql/src/test/results/clientpositive/vectorized_shufflejoin.q.out
    hive/trunk/ql/src/test/results/clientpositive/windowing.q.out
    hive/trunk/ql/src/test/results/clientpositive/windowing_expressions.q.out
    hive/trunk/ql/src/test/results/compiler/plan/case_sensitivity.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/cast1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby2.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby3.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby4.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby5.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/groupby6.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input2.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input20.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input3.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input4.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input5.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input6.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input7.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input8.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input9.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input_part1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input_testxpath.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/input_testxpath2.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join2.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join3.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join4.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join5.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join6.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join7.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/join8.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample2.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample3.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample4.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample5.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample6.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/sample7.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/subq.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/udf1.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/udf4.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/udf6.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/udf_case.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/udf_when.q.xml
    hive/trunk/ql/src/test/results/compiler/plan/union.q.xml

Modified: hive/trunk/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java (original)
+++ hive/trunk/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java Mon Aug 18 17:05:02 2014
@@ -53,7 +53,7 @@ abstract class AbstractRecordWriter impl
   private int currentBucketId = 0;
   private final Path partitionPath;
 
-  final AcidOutputFormat<?> outf;
+  final AcidOutputFormat<?,?> outf;
 
   protected AbstractRecordWriter(HiveEndPoint endPoint, HiveConf conf)
           throws ConnectionError, StreamingException {
@@ -70,7 +70,7 @@ abstract class AbstractRecordWriter impl
                 + endPoint);
       }
       String outFormatName = this.tbl.getSd().getOutputFormat();
-      outf = (AcidOutputFormat<?>) ReflectionUtils.newInstance(Class.forName(outFormatName), conf);
+      outf = (AcidOutputFormat<?,?>) ReflectionUtils.newInstance(Class.forName(outFormatName), conf);
     } catch (MetaException e) {
       throw new ConnectionError(endPoint, e);
     } catch (NoSuchObjectException e) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Mon Aug 18 17:05:02 2014
@@ -105,7 +105,7 @@ public class FetchOperator implements Se
   private transient JobConf job;
   private transient WritableComparable key;
   private transient Writable value;
-  private transient Writable[] vcValues;
+  private transient Object[] vcValues;
   private transient Deserializer serde;
   private transient Deserializer tblSerde;
   private transient Converter partTblObjectInspectorConverter;
@@ -141,12 +141,11 @@ public class FetchOperator implements Se
       List<String> names = new ArrayList<String>(vcCols.size());
       List<ObjectInspector> inspectors = new ArrayList<ObjectInspector>(vcCols.size());
       for (VirtualColumn vc : vcCols) {
-        inspectors.add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
-                vc.getTypeInfo()));
+        inspectors.add(vc.getObjectInspector());
         names.add(vc.getName());
       }
       vcsOI = ObjectInspectorFactory.getStandardStructObjectInspector(names, inspectors);
-      vcValues = new Writable[vcCols.size()];
+      vcValues = new Object[vcCols.size()];
     }
     isPartitioned = work.isPartitioned();
     tblDataDone = false;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java Mon Aug 18 17:05:02 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.conf.HiveC
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.io.IOContext;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
+import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.plan.MapWork;
@@ -140,7 +141,7 @@ public class MapOperator extends Operato
     String tableName;
     String partName;
     List<VirtualColumn> vcs;
-    Writable[] vcValues;
+    Object[] vcValues;
 
     private boolean isPartitioned() {
       return partObjectInspector != null;
@@ -165,7 +166,7 @@ public class MapOperator extends Operato
    * op.
    *
    * @param hconf
-   * @param mrwork
+   * @param mapWork
    * @throws HiveException
    */
   public void initializeAsRoot(Configuration hconf, MapWork mapWork)
@@ -250,13 +251,13 @@ public class MapOperator extends Operato
 
     // The op may not be a TableScan for mapjoins
     // Consider the query: select /*+MAPJOIN(a)*/ count(*) FROM T1 a JOIN T2 b ON a.key = b.key;
-    // In that case, it will be a Select, but the rowOI need not be ammended
+    // In that case, it will be a Select, but the rowOI need not be amended
     if (ctx.op instanceof TableScanOperator) {
       TableScanOperator tsOp = (TableScanOperator) ctx.op;
       TableScanDesc tsDesc = tsOp.getConf();
       if (tsDesc != null && tsDesc.hasVirtualCols()) {
         opCtx.vcs = tsDesc.getVirtualCols();
-        opCtx.vcValues = new Writable[opCtx.vcs.size()];
+        opCtx.vcValues = new Object[opCtx.vcs.size()];
         opCtx.vcsObjectInspector = VirtualColumn.getVCSObjectInspector(opCtx.vcs);
         if (opCtx.isPartitioned()) {
           opCtx.rowWithPartAndVC = Arrays.copyOfRange(opCtx.rowWithPart, 0, 3);
@@ -550,13 +551,13 @@ public class MapOperator extends Operato
     }
   }
 
-  public static Writable[] populateVirtualColumnValues(ExecMapperContext ctx,
-      List<VirtualColumn> vcs, Writable[] vcValues, Deserializer deserializer) {
+  public static Object[] populateVirtualColumnValues(ExecMapperContext ctx,
+      List<VirtualColumn> vcs, Object[] vcValues, Deserializer deserializer) {
     if (vcs == null) {
       return vcValues;
     }
     if (vcValues == null) {
-      vcValues = new Writable[vcs.size()];
+      vcValues = new Object[vcs.size()];
     }
     for (int i = 0; i < vcs.size(); i++) {
       VirtualColumn vc = vcs.get(i);
@@ -602,6 +603,19 @@ public class MapOperator extends Operato
           old.set(current);
         }
       }
+      else if(vc.equals(VirtualColumn.ROWID)) {
+        if(ctx.getIoCxt().ri == null) {
+          vcValues[i] = null;
+        }
+        else {
+          if(vcValues[i] == null) {
+            vcValues[i] = new Object[RecordIdentifier.Field.values().length];
+          }
+          RecordIdentifier.StructInfo.toArray(ctx.getIoCxt().ri, (Object[])vcValues[i]);
+          ctx.getIoCxt().ri = null;//so we don't accidentally cache the value; shouldn't
+          //happen since IO layer either knows how to produce ROW__ID or not - but to be safe
+        }
+      }
     }
     return vcValues;
   }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java Mon Aug 18 17:05:02 2014
@@ -155,7 +155,7 @@ public class ExecMapper extends MapReduc
       }
     }
   }
-
+  @Override
   public void map(Object key, Object value, OutputCollector output,
       Reporter reporter) throws IOException {
     if (oc == null) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java Mon Aug 18 17:05:02 2014
@@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.RecordReader;
@@ -86,11 +86,20 @@ import java.io.IOException;
  * <p>
  * To support transitions between non-ACID layouts to ACID layouts, the input
  * formats are expected to support both layouts and detect the correct one.
- *
- * @param <V> The row type
+ * <p>
+ *   A note on the KEY of this InputFormat.  
+ *   For row-at-a-time processing, KEY can conveniently pass RowId into the operator
+ *   pipeline.  For vectorized execution the KEY could perhaps represent a range in the batch.
+ *   Since {@link org.apache.hadoop.hive.ql.io.orc.OrcInputFormat} is declared to return
+ *   {@code NullWritable} key, {@link org.apache.hadoop.hive.ql.io.AcidRecordReader} is defined
+ *   to provide access to the RowId.  Other implementations of AcidInputFormat can use either
+ *   mechanism.
+ * </p>
+ * 
+ * @param <VALUE> The row type
  */
-public interface AcidInputFormat<V>
-    extends InputFormat<NullWritable, V>, InputFormatChecker {
+public interface AcidInputFormat<KEY extends WritableComparable, VALUE>
+    extends InputFormat<KEY, VALUE>, InputFormatChecker {
 
   /**
    * Options for controlling the record readers.
@@ -140,7 +149,7 @@ public interface AcidInputFormat<V>
    * @return a record reader
    * @throws IOException
    */
-  public RowReader<V> getReader(InputSplit split,
+  public RowReader<VALUE> getReader(InputSplit split,
                                 Options options) throws IOException;
 
   public static interface RawReader<V>
@@ -162,11 +171,18 @@ public interface AcidInputFormat<V>
    * @return a record reader
    * @throws IOException
    */
-   RawReader<V> getRawReader(Configuration conf,
+   RawReader<VALUE> getRawReader(Configuration conf,
                              boolean collapseEvents,
                              int bucket,
                              ValidTxnList validTxnList,
                              Path baseDirectory,
                              Path[] deltaDirectory
                              ) throws IOException;
+
+  /**
+   * RecordReader returned by AcidInputFormat working in row-at-a-time mode should AcidRecordReader.
+   */
+  public interface AcidRecordReader<K,V> extends RecordReader<K,V> {
+    RecordIdentifier getRecordIdentifier();
+  }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java Mon Aug 18 17:05:02 2014
@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.Reporter;
 
 import java.io.IOException;
@@ -34,7 +34,7 @@ import java.util.Properties;
  * An extension for OutputFormats that want to implement ACID transactions.
  * @param <V> the row type of the file
  */
-public interface AcidOutputFormat<V> extends HiveOutputFormat<NullWritable, V> {
+public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveOutputFormat<K, V> {
 
   /**
    * Options to control how the files are written

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java Mon Aug 18 17:05:02 2014
@@ -20,17 +20,13 @@ package org.apache.hadoop.hive.ql.io;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Properties;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.FooterBuffer;
@@ -42,16 +38,13 @@ import org.apache.hadoop.hive.ql.udf.gen
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan;
-import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapred.FileSplit;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.util.ReflectionUtils;
 
 /** This class prepares an IOContext, and provides the ability to perform a binary search on the
   * data.  The binary search can be used by setting the value of inputFormatSorted in the
@@ -119,7 +112,18 @@ public abstract class HiveContextAwareRe
     }
     updateIOContext();
     try {
-      return doNext(key, value);
+      boolean retVal = doNext(key, value);
+      if(retVal) {
+        if(key instanceof RecordIdentifier) {
+          //supports AcidInputFormat which uses the KEY pass ROW__ID info
+          ioCxtRef.ri = (RecordIdentifier)key;
+        }
+        else if(recordReader instanceof AcidInputFormat.AcidRecordReader) {
+          //supports AcidInputFormat which do not use the KEY pass ROW__ID info
+          ioCxtRef.ri = ((AcidInputFormat.AcidRecordReader) recordReader).getRecordIdentifier();
+        }
+      }
+      return retVal;
     } catch (IOException e) {
       ioCxtRef.setIOExceptions(true);
       throw e;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java Mon Aug 18 17:05:02 2014
@@ -69,6 +69,10 @@ public class IOContext {
   Comparison comparison = null;
   // The class name of the generic UDF being used by the filter
   String genericUDFClassName = null;
+  /**
+   * supports {@link org.apache.hadoop.hive.ql.metadata.VirtualColumn#ROWID}
+   */
+  public RecordIdentifier ri;
 
   public static enum Comparison {
     GREATER,

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java Mon Aug 18 17:05:02 2014
@@ -19,16 +19,81 @@
 package org.apache.hadoop.hive.ql.io;
 
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.io.WritableComparable;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 
 /**
- * Gives the Record identifer information for the current record.
+ * Gives the Record identifier information for the current record.
  */
 public class RecordIdentifier implements WritableComparable<RecordIdentifier> {
+  /**
+   * This is in support of {@link org.apache.hadoop.hive.ql.metadata.VirtualColumn#ROWID}
+   * Contains metadata about each field in RecordIdentifier that needs to be part of ROWID
+   * which is represented as a struct {@link org.apache.hadoop.hive.ql.io.RecordIdentifier.StructInfo}.
+   * Each field of RecordIdentifier which should be part of ROWID should be in this enum... which 
+   * really means that it should be part of VirtualColumn (so make a subclass for rowid).
+   */
+  public static enum Field {
+    //note the enum names match field names in the struct
+    transactionId(TypeInfoFactory.longTypeInfo,
+      PrimitiveObjectInspectorFactory.javaLongObjectInspector),
+    bucketId(TypeInfoFactory.intTypeInfo, PrimitiveObjectInspectorFactory.javaIntObjectInspector),
+    rowId(TypeInfoFactory.longTypeInfo, PrimitiveObjectInspectorFactory.javaLongObjectInspector);
+    public final TypeInfo fieldType;
+    public final ObjectInspector fieldOI;
+    Field(TypeInfo fieldType, ObjectInspector fieldOI) {
+      this.fieldType = fieldType;
+      this.fieldOI = fieldOI;
+    }
+  }
+  /**
+   * RecordIdentifier is passed along the operator tree as a struct.  This class contains a few
+   * utilities for that.
+   */
+  public static final class StructInfo {
+    private static final List<String> fieldNames = new ArrayList<String>(Field.values().length);
+    private static final List<TypeInfo> fieldTypes = new ArrayList<TypeInfo>(fieldNames.size());
+    private static final List<ObjectInspector> fieldOis = 
+      new ArrayList<ObjectInspector>(fieldNames.size());
+    static {
+      for(Field f : Field.values()) {
+        fieldNames.add(f.name());
+        fieldTypes.add(f.fieldType);
+        fieldOis.add(f.fieldOI);
+      }
+    }
+    public static final TypeInfo typeInfo = 
+      TypeInfoFactory.getStructTypeInfo(fieldNames, fieldTypes);
+    public static final ObjectInspector oi = 
+      ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOis);
+
+    /**
+     * Copies relevant fields from {@code ri} to {@code struct}
+     * @param ri
+     * @param struct must be of size Field.values().size()
+     */
+    public static void toArray(RecordIdentifier ri, Object[] struct) {
+      assert struct != null && struct.length == Field.values().length;
+      if(ri == null) {
+        Arrays.fill(struct, null);
+        return;
+      }
+      struct[Field.transactionId.ordinal()] = ri.getTransactionId();
+      struct[Field.bucketId.ordinal()] = ri.getBucketId();
+      struct[Field.rowId.ordinal()] = ri.getRowId();
+    }
+  }
+  
   private long transactionId;
   private int bucketId;
   private long rowId;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java Mon Aug 18 17:05:02 2014
@@ -98,7 +98,7 @@ import com.google.common.util.concurrent
  */
 public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
   InputFormatChecker, VectorizedInputFormatInterface,
-    AcidInputFormat<OrcStruct> {
+    AcidInputFormat<NullWritable, OrcStruct> {
 
   private static final Log LOG = LogFactory.getLog(OrcInputFormat.class);
   static final HadoopShims SHIMS = ShimLoader.getHadoopShims();
@@ -989,7 +989,7 @@ public class OrcInputFormat  implements 
     boolean vectorMode = Utilities.isVectorMode(conf);
 
     // if HiveCombineInputFormat gives us FileSplits instead of OrcSplits,
-    // we know it is not ACID.
+    // we know it is not ACID. (see a check in CombineHiveInputFormat.getSplits() that assures this)
     if (inputSplit.getClass() == FileSplit.class) {
       if (vectorMode) {
         return createVectorizedReader(inputSplit, conf, reporter);
@@ -998,62 +998,75 @@ public class OrcInputFormat  implements 
           ((FileSplit) inputSplit).getPath(),
           OrcFile.readerOptions(conf)), conf, (FileSplit) inputSplit);
     }
-
+    
     OrcSplit split = (OrcSplit) inputSplit;
     reporter.setStatus(inputSplit.toString());
 
-    // if we are strictly old-school, just use the old code
+    Options options = new Options(conf).reporter(reporter);
+    final RowReader<OrcStruct> inner = getReader(inputSplit, options);
+    
+    
+    /*Even though there are no delta files, we still need to produce row ids so that an
+    * UPDATE or DELETE statement would work on a table which didn't have any previous updates*/
     if (split.isOriginal() && split.getDeltas().isEmpty()) {
       if (vectorMode) {
         return createVectorizedReader(inputSplit, conf, reporter);
       } else {
-        return new OrcRecordReader(OrcFile.createReader(split.getPath(),
-            OrcFile.readerOptions(conf)), conf, split);
+        return new NullKeyRecordReader(inner, conf);
       }
     }
 
-    Options options = new Options(conf).reporter(reporter);
-    final RowReader<OrcStruct> inner = getReader(inputSplit, options);
     if (vectorMode) {
       return (org.apache.hadoop.mapred.RecordReader)
           new VectorizedOrcAcidRowReader(inner, conf, (FileSplit) inputSplit);
     }
-    final RecordIdentifier id = inner.createKey();
-
-    // Return a RecordReader that is compatible with the Hive 0.12 reader
-    // with NullWritable for the key instead of RecordIdentifier.
-    return new org.apache.hadoop.mapred.RecordReader<NullWritable, OrcStruct>(){
-      @Override
-      public boolean next(NullWritable nullWritable,
-                          OrcStruct orcStruct) throws IOException {
-        return inner.next(id, orcStruct);
-      }
+    return new NullKeyRecordReader(inner, conf);
+  }
+  /**
+   * Return a RecordReader that is compatible with the Hive 0.12 reader
+   * with NullWritable for the key instead of RecordIdentifier.
+   */
+  public static final class NullKeyRecordReader implements AcidRecordReader<NullWritable, OrcStruct> {
+    private final RecordIdentifier id;
+    private final RowReader<OrcStruct> inner;
+
+    public RecordIdentifier getRecordIdentifier() {
+      return id;
+    }
+    private NullKeyRecordReader(RowReader<OrcStruct> inner, Configuration conf) {
+      this.inner = inner;
+      id = inner.createKey();
+    }
+    @Override
+    public boolean next(NullWritable nullWritable,
+                        OrcStruct orcStruct) throws IOException {
+      return inner.next(id, orcStruct);
+    }
 
-      @Override
-      public NullWritable createKey() {
-        return NullWritable.get();
-      }
+    @Override
+    public NullWritable createKey() {
+      return NullWritable.get();
+    }
 
-      @Override
-      public OrcStruct createValue() {
-        return inner.createValue();
-      }
+    @Override
+    public OrcStruct createValue() {
+      return inner.createValue();
+    }
 
-      @Override
-      public long getPos() throws IOException {
-        return inner.getPos();
-      }
+    @Override
+    public long getPos() throws IOException {
+      return inner.getPos();
+    }
 
-      @Override
-      public void close() throws IOException {
-        inner.close();
-      }
+    @Override
+    public void close() throws IOException {
+      inner.close();
+    }
 
-      @Override
-      public float getProgress() throws IOException {
-        return inner.getProgress();
-      }
-    };
+    @Override
+    public float getProgress() throws IOException {
+      return inner.getProgress();
+    }
   }
 
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java Mon Aug 18 17:05:02 2014
@@ -50,7 +50,7 @@ import java.util.Properties;
  * A Hive OutputFormat for ORC files.
  */
 public class OrcOutputFormat extends FileOutputFormat<NullWritable, OrcSerdeRow>
-                        implements AcidOutputFormat<OrcSerdeRow> {
+                        implements AcidOutputFormat<NullWritable, OrcSerdeRow> {
 
   private static class OrcRecordWriter
       implements RecordWriter<NullWritable, OrcSerdeRow>,

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java Mon Aug 18 17:05:02 2014
@@ -25,11 +25,13 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 
 public class VirtualColumn implements Serializable {
@@ -41,6 +43,10 @@ public class VirtualColumn implements Se
   public static VirtualColumn ROWOFFSET = new VirtualColumn("ROW__OFFSET__INSIDE__BLOCK", (PrimitiveTypeInfo)TypeInfoFactory.longTypeInfo);
 
   public static VirtualColumn RAWDATASIZE = new VirtualColumn("RAW__DATA__SIZE", (PrimitiveTypeInfo)TypeInfoFactory.longTypeInfo);
+  /**
+   * {@link org.apache.hadoop.hive.ql.io.RecordIdentifier} 
+   */
+  public static VirtualColumn ROWID = new VirtualColumn("ROW__ID", RecordIdentifier.StructInfo.typeInfo, true, RecordIdentifier.StructInfo.oi);
 
   /**
    * GROUPINGID is used with GROUP BY GROUPINGS SETS, ROLLUP and CUBE.
@@ -53,23 +59,26 @@ public class VirtualColumn implements Se
       new VirtualColumn("GROUPING__ID", (PrimitiveTypeInfo) TypeInfoFactory.intTypeInfo);
 
   public static VirtualColumn[] VIRTUAL_COLUMNS =
-      new VirtualColumn[] {FILENAME, BLOCKOFFSET, ROWOFFSET, RAWDATASIZE, GROUPINGID};
+      new VirtualColumn[] {FILENAME, BLOCKOFFSET, ROWOFFSET, RAWDATASIZE, GROUPINGID, ROWID};
 
   private String name;
-  private PrimitiveTypeInfo typeInfo;
+  private TypeInfo typeInfo;
   private boolean isHidden = true;
+  private ObjectInspector oi;
 
   public VirtualColumn() {
   }
 
   public VirtualColumn(String name, PrimitiveTypeInfo typeInfo) {
-    this(name, typeInfo, true);
+    this(name, typeInfo, true, 
+      PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(typeInfo));
   }
 
-  VirtualColumn(String name, PrimitiveTypeInfo typeInfo, boolean isHidden) {
+  VirtualColumn(String name, TypeInfo typeInfo, boolean isHidden, ObjectInspector oi) {
     this.name = name;
     this.typeInfo = typeInfo;
     this.isHidden = isHidden;
+    this.oi = oi;
   }
 
   public static List<VirtualColumn> getStatsRegistry(Configuration conf) {
@@ -87,11 +96,12 @@ public class VirtualColumn implements Se
     if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEROWOFFSET)) {
       l.add(ROWOFFSET);
     }
+    l.add(ROWID);
 
     return l;
   }
 
-  public PrimitiveTypeInfo getTypeInfo() {
+  public TypeInfo getTypeInfo() {
     return typeInfo;
   }
 
@@ -118,6 +128,9 @@ public class VirtualColumn implements Se
   public void setIsHidden(boolean isHidden) {
     this.isHidden = isHidden;
   }
+  public ObjectInspector getObjectInspector() {
+    return oi;
+  }
 
   @Override
   public boolean equals(Object o) {
@@ -144,8 +157,7 @@ public class VirtualColumn implements Se
     List<ObjectInspector> inspectors = new ArrayList<ObjectInspector>(vcs.size());
     for (VirtualColumn vc : vcs) {
       names.add(vc.getName());
-      inspectors.add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
-          vc.getTypeInfo()));
+      inspectors.add(vc.oi);
     }
     return ObjectInspectorFactory.getStandardStructObjectInspector(names, inspectors);
   }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java Mon Aug 18 17:05:02 2014
@@ -39,6 +39,7 @@ import org.apache.hadoop.hive.serde.serd
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobClient;
@@ -485,20 +486,21 @@ public class CompactorMR {
   }
 
   static class CompactorMap<V extends Writable>
-      implements Mapper<NullWritable, CompactorInputSplit,  NullWritable,  NullWritable> {
+      implements Mapper<WritableComparable, CompactorInputSplit,  NullWritable,  NullWritable> {
 
     JobConf jobConf;
     RecordWriter writer;
 
     @Override
-    public void map(NullWritable key, CompactorInputSplit split,
+    public void map(WritableComparable key, CompactorInputSplit split,
                     OutputCollector<NullWritable, NullWritable> nullWritableVOutputCollector,
                     Reporter reporter) throws IOException {
       // This will only get called once, since CompactRecordReader only returns one record,
       // the input split.
       // Based on the split we're passed we go instantiate the real reader and then iterate on it
       // until it finishes.
-      AcidInputFormat aif =
+      @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class
+      AcidInputFormat<WritableComparable, V> aif =
           instantiate(AcidInputFormat.class, jobConf.get(INPUT_FORMAT_CLASS_NAME));
       ValidTxnList txnList =
           new ValidTxnListImpl(jobConf.get(ValidTxnList.VALID_TXNS_KEY));
@@ -541,7 +543,8 @@ public class CompactorMR {
             .bucket(bucket);
 
         // Instantiate the underlying output format
-        AcidOutputFormat<V> aof =
+        @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class
+        AcidOutputFormat<WritableComparable, V> aof =
             instantiate(AcidOutputFormat.class, jobConf.get(OUTPUT_FORMAT_CLASS_NAME));
 
         writer = aof.getRawRecordWriter(new Path(jobConf.get(TMP_LOCATION)), options);

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java Mon Aug 18 17:05:02 2014
@@ -973,7 +973,7 @@ public class TestInputOutputFormat {
     List<? extends StructField> fields =inspector.getAllStructFieldRefs();
     IntObjectInspector intInspector =
         (IntObjectInspector) fields.get(0).getFieldObjectInspector();
-    assertEquals(0.0, reader.getProgress(), 0.00001);
+    assertEquals(0.33, reader.getProgress(), 0.01);
     while (reader.next(key, value)) {
       assertEquals(++rowNum, intInspector.get(inspector.
           getStructFieldData(serde.deserialize(value), fields.get(0))));

Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java Mon Aug 18 17:05:02 2014
@@ -36,9 +36,9 @@ import org.apache.hadoop.hive.ql.io.Reco
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.*;
 import org.apache.hadoop.util.Progressable;
 import org.apache.thrift.TException;
@@ -276,7 +276,7 @@ public abstract class CompactorTest {
     }
   }
 
-  static class MockInputFormat implements AcidInputFormat<Text> {
+  static class MockInputFormat implements AcidInputFormat<WritableComparable,Text> {
 
     @Override
     public AcidInputFormat.RowReader<Text> getReader(InputSplit split,
@@ -315,7 +315,7 @@ public abstract class CompactorTest {
     }
 
     @Override
-    public RecordReader<NullWritable, Text> getRecordReader(InputSplit inputSplit, JobConf entries,
+    public RecordReader<WritableComparable, Text> getRecordReader(InputSplit inputSplit, JobConf entries,
                                                             Reporter reporter) throws IOException {
       return null;
     }
@@ -398,7 +398,7 @@ public abstract class CompactorTest {
   // This class isn't used and I suspect does totally the wrong thing.  It's only here so that I
   // can provide some output format to the tables and partitions I create.  I actually write to
   // those tables directory.
-  static class MockOutputFormat implements AcidOutputFormat<Text> {
+  static class MockOutputFormat implements AcidOutputFormat<WritableComparable, Text> {
 
     @Override
     public RecordUpdater getRecordUpdater(Path path, Options options) throws
@@ -420,7 +420,7 @@ public abstract class CompactorTest {
     }
 
     @Override
-    public RecordWriter<NullWritable, Text> getRecordWriter(FileSystem fileSystem, JobConf entries,
+    public RecordWriter<WritableComparable, Text> getRecordWriter(FileSystem fileSystem, JobConf entries,
                                                             String s,
                                                             Progressable progressable) throws
         IOException {

Modified: hive/trunk/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out Mon Aug 18 17:05:02 2014
@@ -133,10 +133,10 @@ STAGE PLANS:
           condition expressions:
             0 {KEY.reducesinkkey0} {VALUE._col0}
             1 {VALUE._col0}
-          outputColumnNames: _col0, _col1, _col6
+          outputColumnNames: _col0, _col1, _col7
           Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+            expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -209,10 +209,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: int)
                   1 key (type: int)
-                outputColumnNames: _col0, _col1, _col6
+                outputColumnNames: _col0, _col1, _col7
                 Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/trunk/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out Mon Aug 18 17:05:02 2014
@@ -104,10 +104,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: int)
                   1 key (type: int)
-                outputColumnNames: _col0, _col1, _col4, _col5
+                outputColumnNames: _col0, _col1, _col5, _col6
                 Statistics: Num rows: 275 Data size: 2646 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string)
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 275 Data size: 2646 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/trunk/ql/src/test/results/clientnegative/udf_assert_true.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/udf_assert_true.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/udf_assert_true.q.out (original)
+++ hive/trunk/ql/src/test/results/clientnegative/udf_assert_true.q.out Mon Aug 18 17:05:02 2014
@@ -23,10 +23,10 @@ STAGE PLANS:
               Select Operator
                 Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                 Lateral View Join Operator
-                  outputColumnNames: _col4
+                  outputColumnNames: _col5
                   Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                   Select Operator
-                    expressions: assert_true((_col4 > 0)) (type: void)
+                    expressions: assert_true((_col5 > 0)) (type: void)
                     outputColumnNames: _col0
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Limit
@@ -47,10 +47,10 @@ STAGE PLANS:
                   Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                   function name: explode
                   Lateral View Join Operator
-                    outputColumnNames: _col4
+                    outputColumnNames: _col5
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      expressions: assert_true((_col4 > 0)) (type: void)
+                      expressions: assert_true((_col5 > 0)) (type: void)
                       outputColumnNames: _col0
                       Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                       Limit
@@ -100,10 +100,10 @@ STAGE PLANS:
               Select Operator
                 Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                 Lateral View Join Operator
-                  outputColumnNames: _col4
+                  outputColumnNames: _col5
                   Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                   Select Operator
-                    expressions: assert_true((_col4 < 2)) (type: void)
+                    expressions: assert_true((_col5 < 2)) (type: void)
                     outputColumnNames: _col0
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Limit
@@ -124,10 +124,10 @@ STAGE PLANS:
                   Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                   function name: explode
                   Lateral View Join Operator
-                    outputColumnNames: _col4
+                    outputColumnNames: _col5
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      expressions: assert_true((_col4 < 2)) (type: void)
+                      expressions: assert_true((_col5 < 2)) (type: void)
                       outputColumnNames: _col0
                       Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                       Limit

Modified: hive/trunk/ql/src/test/results/clientnegative/udf_assert_true2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/udf_assert_true2.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/udf_assert_true2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientnegative/udf_assert_true2.q.out Mon Aug 18 17:05:02 2014
@@ -18,10 +18,10 @@ STAGE PLANS:
               Select Operator
                 Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                 Lateral View Join Operator
-                  outputColumnNames: _col4
+                  outputColumnNames: _col5
                   Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                   Select Operator
-                    expressions: (1 + assert_true((_col4 < 2))) (type: double)
+                    expressions: (1 + assert_true((_col5 < 2))) (type: double)
                     outputColumnNames: _col0
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Limit
@@ -42,10 +42,10 @@ STAGE PLANS:
                   Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
                   function name: explode
                   Lateral View Join Operator
-                    outputColumnNames: _col4
+                    outputColumnNames: _col5
                     Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                     Select Operator
-                      expressions: (1 + assert_true((_col4 < 2))) (type: double)
+                      expressions: (1 + assert_true((_col5 < 2))) (type: double)
                       outputColumnNames: _col0
                       Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE
                       Limit

Modified: hive/trunk/ql/src/test/results/clientpositive/allcolref_in_udf.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/allcolref_in_udf.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/allcolref_in_udf.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/allcolref_in_udf.q.out Mon Aug 18 17:05:02 2014
@@ -112,10 +112,10 @@ STAGE PLANS:
           condition expressions:
             0 {VALUE._col0} {VALUE._col1}
             1 {VALUE._col0} {VALUE._col1}
-          outputColumnNames: _col0, _col1, _col4, _col5
+          outputColumnNames: _col0, _col1, _col5, _col6
           Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: 2 (type: int), concat(_col0, _col1, _col4, _col5) (type: string), concat(_col0, _col1) (type: string), concat(_col4, _col5) (type: string), concat(_col0, _col1, _col4) (type: string), concat(_col0, _col4, _col5) (type: string)
+            expressions: 2 (type: int), concat(_col0, _col1, _col5, _col6) (type: string), concat(_col0, _col1) (type: string), concat(_col5, _col6) (type: string), concat(_col0, _col1, _col5) (type: string), concat(_col0, _col5, _col6) (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
             Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
             UDTF Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/annotate_stats_join.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/annotate_stats_join.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/annotate_stats_join.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/annotate_stats_join.q.out Mon Aug 18 17:05:02 2014
@@ -193,10 +193,10 @@ STAGE PLANS:
           condition expressions:
             0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
             1 {KEY.reducesinkkey0} {VALUE._col0}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7
           Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string)
+            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
@@ -259,13 +259,13 @@ STAGE PLANS:
           condition expressions:
             0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
             1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7
           Statistics: Num rows: 6 Data size: 1164 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
-            predicate: ((_col1 = _col5) and (_col0 = _col6)) (type: boolean)
+            predicate: ((_col1 = _col6) and (_col0 = _col7)) (type: boolean)
             Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string)
+              expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
               Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE
               File Output Operator
@@ -324,10 +324,10 @@ STAGE PLANS:
           condition expressions:
             0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
             1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7
           Statistics: Num rows: 6 Data size: 1164 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string)
+            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 6 Data size: 1164 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
@@ -390,13 +390,13 @@ STAGE PLANS:
           condition expressions:
             0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
             1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7
           Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
-            predicate: (((_col1 = _col5) and (_col0 = _col6)) and (_col6 = _col0)) (type: boolean)
+            predicate: (((_col1 = _col6) and (_col0 = _col7)) and (_col7 = _col0)) (type: boolean)
             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
             Select Operator
-              expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string)
+              expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
               Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
               File Output Operator
@@ -474,10 +474,10 @@ STAGE PLANS:
             0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
             1 {KEY.reducesinkkey0} {VALUE._col0}
             2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col9, _col10, _col11
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13
           Statistics: Num rows: 658 Data size: 192794 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string), _col9 (type: string), _col10 (type: int), _col11 (type: int)
+            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
             Statistics: Num rows: 658 Data size: 192794 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
@@ -553,10 +553,10 @@ STAGE PLANS:
             0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1}
             1 {KEY.reducesinkkey0} {VALUE._col0}
             2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col9, _col10, _col11, _col12
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13, _col14
           Statistics: Num rows: 47 Data size: 13912 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string), _col9 (type: string), _col10 (type: int), _col11 (type: bigint), _col12 (type: int)
+            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: bigint), _col14 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
             Statistics: Num rows: 47 Data size: 13912 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
@@ -633,10 +633,10 @@ STAGE PLANS:
             0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0}
             1 {KEY.reducesinkkey0} {KEY.reducesinkkey1}
             2 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1}
-          outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col9, _col10, _col11, _col12
+          outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13, _col14
           Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string), _col9 (type: string), _col10 (type: int), _col11 (type: bigint), _col12 (type: int)
+            expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: bigint), _col14 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
             Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_join1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_join1.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_join1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_join1.q.out Mon Aug 18 17:05:02 2014
@@ -61,10 +61,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col5
+                outputColumnNames: _col0, _col6
                 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), _col5 (type: string)
+                  expressions: UDFToInteger(_col0) (type: int), _col6 (type: string)
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_join14.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_join14.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_join14.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_join14.q.out Mon Aug 18 17:05:02 2014
@@ -65,10 +65,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col5
+                outputColumnNames: _col0, _col6
                 Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), _col5 (type: string)
+                  expressions: UDFToInteger(_col0) (type: int), _col6 (type: string)
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_join15.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_join15.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_join15.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_join15.q.out Mon Aug 18 17:05:02 2014
@@ -61,10 +61,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col1, _col4, _col5
+                outputColumnNames: _col0, _col1, _col5, _col6
                 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string)
+                  expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_join17.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_join17.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_join17.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_join17.q.out Mon Aug 18 17:05:02 2014
@@ -61,10 +61,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col1, _col4, _col5
+                outputColumnNames: _col0, _col1, _col5, _col6
                 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col4) (type: int), _col5 (type: string)
+                  expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col5) (type: int), _col6 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_join19.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_join19.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_join19.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_join19.q.out Mon Aug 18 17:05:02 2014
@@ -63,10 +63,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col7
+                outputColumnNames: _col0, _col8
                 Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), _col7 (type: string)
+                  expressions: UDFToInteger(_col0) (type: int), _col8 (type: string)
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_join2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_join2.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_join2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_join2.q.out Mon Aug 18 17:05:02 2014
@@ -57,7 +57,7 @@ STAGE PLANS:
                   0 {_col0}
                   1 {value}
                 keys:
-                  0 (_col0 + _col4) (type: double)
+                  0 (_col0 + _col5) (type: double)
                   1 UDFToDouble(key) (type: double)
 
   Stage: Stage-6
@@ -78,10 +78,10 @@ STAGE PLANS:
                 keys:
                   0 key (type: string)
                   1 key (type: string)
-                outputColumnNames: _col0, _col4
+                outputColumnNames: _col0, _col5
                 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: (_col0 + _col4) is not null (type: boolean)
+                  predicate: (_col0 + _col5) is not null (type: boolean)
                   Statistics: Num rows: 16 Data size: 1649 Basic stats: COMPLETE Column stats: NONE
                   Map Join Operator
                     condition map:
@@ -90,12 +90,12 @@ STAGE PLANS:
                       0 {_col0}
                       1 {value}
                     keys:
-                      0 (_col0 + _col4) (type: double)
+                      0 (_col0 + _col5) (type: double)
                       1 UDFToDouble(key) (type: double)
-                    outputColumnNames: _col0, _col9
+                    outputColumnNames: _col0, _col11
                     Statistics: Num rows: 17 Data size: 1813 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: UDFToInteger(_col0) (type: int), _col9 (type: string)
+                      expressions: UDFToInteger(_col0) (type: int), _col11 (type: string)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 17 Data size: 1813 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_join20.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_join20.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_join20.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_join20.q.out Mon Aug 18 17:05:02 2014
@@ -94,10 +94,10 @@ STAGE PLANS:
                 0 key (type: string)
                 1 key (type: string)
                 2 key (type: string)
-              outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9
+              outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
               Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string)
+                expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
@@ -270,10 +270,10 @@ STAGE PLANS:
                 0 key (type: string)
                 1 key (type: string)
                 2 key (type: string)
-              outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9
+              outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
               Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string)
+                expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_join21.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_join21.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_join21.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_join21.q.out Mon Aug 18 17:05:02 2014
@@ -80,10 +80,10 @@ STAGE PLANS:
                 0 key (type: string)
                 1 key (type: string)
                 2 key (type: string)
-              outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9
+              outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
               Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string)
+                expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator

Modified: hive/trunk/ql/src/test/results/clientpositive/auto_join22.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_join22.q.out?rev=1618664&r1=1618663&r2=1618664&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_join22.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_join22.q.out Mon Aug 18 17:05:02 2014
@@ -82,10 +82,10 @@ STAGE PLANS:
                     keys:
                       0 key (type: string)
                       1 _col2 (type: string)
-                    outputColumnNames: _col7
+                    outputColumnNames: _col8
                     Statistics: Num rows: 34 Data size: 3515 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: _col7 (type: string)
+                      expressions: _col8 (type: string)
                       outputColumnNames: _col3
                       Statistics: Num rows: 34 Data size: 3515 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator