You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/09/16 00:46:49 UTC

svn commit: r1625176 [1/9] - in /hive/branches/cbo: ./ common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ contrib/src/test/results/clientpositive/ data/conf/tez/ data/files/ itests/hive-unit/src/test/java/org/ap...

Author: gunther
Date: Mon Sep 15 22:46:44 2014
New Revision: 1625176

URL: http://svn.apache.org/r1625176
Log:
Merge latest trunk into cbo branch. (Gunther Hagleitner)

Added:
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DataInputByteBuffer.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DataInputByteBuffer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileTezProcessor.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileTezProcessor.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileInputFormat.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileMapper.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileMapper.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileOutputFormat.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileOutputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/OrcFileMergeDesc.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/OrcFileMergeDesc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/RCFileMergeDesc.java
      - copied unchanged from r1625173, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/RCFileMergeDesc.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
      - copied unchanged from r1625173, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
    hive/branches/cbo/ql/src/test/queries/clientnegative/acid_overwrite.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientnegative/acid_overwrite.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_uri_export.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientnegative/authorization_uri_export.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_uri_import.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientnegative/authorization_uri_import.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/delete_not_acid.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientnegative/delete_not_acid.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/update_not_acid.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientnegative/update_not_acid.q
    hive/branches/cbo/ql/src/test/queries/clientnegative/update_partition_col.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientnegative/update_partition_col.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/authorization_default_create_table_owner_privs.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/authorization_default_create_table_owner_privs.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/delete_all_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/delete_all_partitioned.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/delete_orig_table.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/delete_orig_table.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/delete_tmp_table.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/delete_tmp_table.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/delete_where_no_match.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/delete_where_no_match.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/delete_where_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/delete_where_partitioned.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/delete_whole_partition.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/delete_whole_partition.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/insert_orig_table.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/insert_orig_table.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/insert_update_delete.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/insert_update_delete.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_orig_table.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/insert_values_orig_table.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/insert_values_partitioned.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/insert_values_tmp_table.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge5.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/orc_merge5.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge6.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/orc_merge6.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge7.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/orc_merge7.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/update_all_non_partitioned.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/update_all_partitioned.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_types.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/update_all_types.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/update_orig_table.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/update_orig_table.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/update_tmp_table.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/update_tmp_table.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/update_two_cols.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/update_two_cols.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/update_where_no_match.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/update_where_no_match.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/update_where_non_partitioned.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/update_where_partitioned.q
      - copied unchanged from r1625173, hive/trunk/ql/src/test/queries/clientpositive/update_where_partitioned.q
    hive/branches/cbo/ql/src/test/results/clientnegative/acid_overwrite.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientnegative/acid_overwrite.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/authorization_uri_export.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientnegative/authorization_uri_export.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/authorization_uri_import.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientnegative/authorization_uri_import.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/delete_not_acid.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientnegative/delete_not_acid.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/update_not_acid.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientnegative/update_not_acid.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/update_partition_col.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientnegative/update_partition_col.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/authorization_default_create_table_owner_privs.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/authorization_default_create_table_owner_privs.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/delete_all_non_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/delete_all_non_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/delete_all_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/delete_all_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/delete_orig_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/delete_orig_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/delete_tmp_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/delete_tmp_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/delete_where_no_match.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/delete_where_no_match.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/delete_where_non_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/delete_where_non_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/delete_where_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/delete_where_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/delete_whole_partition.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/delete_whole_partition.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/insert_orig_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/insert_orig_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/insert_update_delete.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/insert_update_delete.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/insert_values_dynamic_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/insert_values_dynamic_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/insert_values_non_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/insert_values_non_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/insert_values_orig_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/insert_values_orig_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/insert_values_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/insert_values_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/insert_values_tmp_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/insert_values_tmp_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_merge5.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/orc_merge5.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_merge6.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/orc_merge6.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_merge7.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/orc_merge7.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/correlationoptimizer1.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/correlationoptimizer1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/delete_all_non_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/delete_all_non_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/delete_all_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/delete_all_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/delete_orig_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/delete_orig_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/delete_tmp_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/delete_tmp_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/delete_where_no_match.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/delete_where_no_match.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/delete_where_non_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/delete_where_non_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/delete_where_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/delete_where_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/delete_whole_partition.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/delete_whole_partition.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/insert_orig_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/insert_orig_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/insert_update_delete.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/insert_update_delete.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/insert_values_dynamic_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/insert_values_dynamic_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/insert_values_orig_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/insert_values_orig_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/insert_values_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/insert_values_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/insert_values_tmp_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/insert_values_tmp_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/orc_merge5.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge5.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/orc_merge6.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge6.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/orc_merge7.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge7.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/orc_merge_incompat2.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/orc_merge_incompat2.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/update_after_multiple_inserts.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/update_after_multiple_inserts.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/update_all_non_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/update_all_non_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/update_all_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/update_all_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/update_all_types.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/update_all_types.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/update_orig_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/update_orig_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/update_tmp_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/update_tmp_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/update_two_cols.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/update_two_cols.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/update_where_no_match.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/update_where_no_match.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/update_where_non_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/update_where_non_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/update_where_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/tez/update_where_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/update_after_multiple_inserts.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/update_after_multiple_inserts.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/update_all_non_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/update_all_non_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/update_all_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/update_all_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/update_all_types.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/update_all_types.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/update_orig_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/update_orig_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/update_tmp_table.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/update_tmp_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/update_two_cols.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/update_two_cols.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/update_where_no_match.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/update_where_no_match.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/update_where_non_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/update_where_non_partitioned.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/update_where_partitioned.q.out
      - copied unchanged from r1625173, hive/trunk/ql/src/test/results/clientpositive/update_where_partitioned.q.out
    hive/branches/cbo/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/PropValueUnion.java
      - copied unchanged from r1625173, hive/trunk/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/PropValueUnion.java
    hive/branches/cbo/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java
      - copied unchanged from r1625173, hive/trunk/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ThriftObjectInspectorUtils.java
      - copied unchanged from r1625173, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ThriftObjectInspectorUtils.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ThriftUnionObjectInspector.java
      - copied unchanged from r1625173, hive/trunk/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ThriftUnionObjectInspector.java
Removed:
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeMapper.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeOutputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeWork.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java
Modified:
    hive/branches/cbo/   (props changed)
    hive/branches/cbo/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
    hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/cbo/contrib/src/test/results/clientpositive/udf_example_arraymapstruct.q.out
    hive/branches/cbo/data/conf/tez/hive-site.xml
    hive/branches/cbo/data/files/complex.seq
    hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
    hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
    hive/branches/cbo/itests/src/test/resources/testconfiguration.properties
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
    hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
    hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java
    hive/branches/cbo/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionEdge.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileStripeMergeInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Writer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeInputFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java
    hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
    hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
    hive/branches/cbo/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/input_lazyserde.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/orc_merge1.q
    hive/branches/cbo/ql/src/test/queries/clientpositive/stats_only_null.q
    hive/branches/cbo/ql/src/test/results/beelinepositive/convert_enum_to_string.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/describe_xpath1.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/describe_xpath2.q.out
    hive/branches/cbo/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/case_sensitivity.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/convert_enum_to_string.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_00_nonpart_empty.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_01_nonpart.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_02_00_part_empty.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_02_part.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_03_nonpart_over_compat.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_04_all_part.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_04_evolved_parts.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_05_some_part.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_06_one_part.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_07_all_part_over_nonoverlap.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_08_nonpart_rename.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_09_part_spec_nonoverlap.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_10_external_managed.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_11_managed_external.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_12_external_location.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_13_managed_location.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_14_managed_location_over_existing.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_15_external_part.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_16_part_external.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_17_part_managed.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_18_part_external.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_19_00_part_external_location.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_19_part_external_location.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_20_part_managed_location.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_22_import_exist_authsuccess.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_23_import_part_authsuccess.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_24_import_nonexist_authsuccess.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/exim_hidden_files.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/import_exported_table.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input17.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input5.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input_columnarserde.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input_dynamicserde.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input_lazyserde.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input_testxpath.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input_testxpath2.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input_testxpath3.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/input_testxpath4.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/inputddl8.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/join_thrift.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/list_bucket_dml_10.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_createas1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_merge1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/orc_merge3.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/rcfile_createas1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/rcfile_merge1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/rcfile_merge2.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/rcfile_merge3.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/stats_only_null.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/tez/orc_merge1.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_case_thrift.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_coalesce.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_isnull_isnotnull.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/udf_size.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union21.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_10.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_11.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_12.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_13.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_14.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_16.q.out
    hive/branches/cbo/ql/src/test/results/clientpositive/union_remove_9.q.out
    hive/branches/cbo/ql/src/test/results/compiler/plan/case_sensitivity.q.xml
    hive/branches/cbo/ql/src/test/results/compiler/plan/input5.q.xml
    hive/branches/cbo/ql/src/test/results/compiler/plan/input_testxpath.q.xml
    hive/branches/cbo/ql/src/test/results/compiler/plan/input_testxpath2.q.xml
    hive/branches/cbo/serde/if/test/complex.thrift
    hive/branches/cbo/serde/src/gen/thrift/gen-cpp/complex_types.cpp
    hive/branches/cbo/serde/src/gen/thrift/gen-cpp/complex_types.h
    hive/branches/cbo/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java
    hive/branches/cbo/serde/src/gen/thrift/gen-py/complex/ttypes.py
    hive/branches/cbo/serde/src/gen/thrift/gen-rb/complex_types.rb
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorFactory.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ReflectionStructObjectInspector.java
    hive/branches/cbo/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/StandardListObjectInspector.java
    hive/branches/cbo/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorUtils.java
    hive/branches/cbo/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestThriftObjectInspectors.java
    hive/branches/cbo/serde/src/test/org/apache/hadoop/hive/serde2/thrift_test/CreateSequenceFile.java

Propchange: hive/branches/cbo/
------------------------------------------------------------------------------
  Merged /hive/trunk:r1624387-1625173

Modified: hive/branches/cbo/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/common/src/java/org/apache/hadoop/hive/common/FileUtils.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/common/src/java/org/apache/hadoop/hive/common/FileUtils.java (original)
+++ hive/branches/cbo/common/src/java/org/apache/hadoop/hive/common/FileUtils.java Mon Sep 15 22:46:44 2014
@@ -446,12 +446,26 @@ public final class FileUtils {
   public static boolean isLocalFile(HiveConf conf, String fileName) {
     try {
       // do best effor to determine if this is a local file
-      FileSystem fsForFile = FileSystem.get(new URI(fileName), conf);
-      return LocalFileSystem.class.isInstance(fsForFile);
+      return isLocalFile(conf, new URI(fileName));
     } catch (URISyntaxException e) {
       LOG.warn("Unable to create URI from " + fileName, e);
+    }
+    return false;
+  }
+
+  /**
+   * A best effort attempt to determine if if the file is a local file
+   * @param conf
+   * @param fileUri
+   * @return true if it was successfully able to determine that it is a local file
+   */
+  public static boolean isLocalFile(HiveConf conf, URI fileUri) {
+    try {
+      // do best effor to determine if this is a local file
+      FileSystem fsForFile = FileSystem.get(fileUri, conf);
+      return LocalFileSystem.class.isInstance(fsForFile);
     } catch (IOException e) {
-      LOG.warn("Unable to get FileSystem for " + fileName, e);
+      LOG.warn("Unable to get FileSystem for " + fileUri, e);
     }
     return false;
   }

Modified: hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Mon Sep 15 22:46:44 2014
@@ -301,6 +301,9 @@ public class HiveConf extends Configurat
 
     HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true),
 
+    HIVE_IN_TEZ_TEST("hive.in.tez.test", false, "internal use only, true when in testing tez",
+        true),
+
     LOCALMODEAUTO("hive.exec.mode.local.auto", false,
         "Let Hive determine whether to run in local mode automatically"),
     LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L,
@@ -806,18 +809,11 @@ public class HiveConf extends Configurat
         "map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" +
         "if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."),
     HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""),
-    HIVEMERGEINPUTFORMATBLOCKLEVEL("hive.merge.input.format.block.level",
-        "org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat", ""),
     HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true,
         "When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a\n" +
         " table with ORC file format, enabling this config will do stripe level fast merge\n" +
         " for small ORC files. Note that enabling this config will not honor padding tolerance\n" +
         " config (hive.exec.orc.block.padding.tolerance)."),
-    HIVEMERGEINPUTFORMATSTRIPELEVEL("hive.merge.input.format.stripe.level",
-        "org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat",
-        "Input file format to use for ORC stripe level merging (for internal use only)"),
-    HIVEMERGECURRENTJOBHASDYNAMICPARTITIONS(
-        "hive.merge.current.job.has.dynamic.partitions", false, ""),
 
     HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true,
         "If this is set the header for RCFiles will simply be RCF.  If this is not\n" +
@@ -1679,17 +1675,6 @@ public class HiveConf extends Configurat
         "               it will now take 512 reducers, similarly if the max number of reducers is 511,\n" +
         "               and a job was going to use this many, it will now use 256 reducers."),
 
-    /* The following section contains all configurations used for list bucketing feature.*/
-    /* This is not for clients. but only for block merge task. */
-    /* This is used by BlockMergeTask to send out flag to RCFileMergeMapper */
-    /* about alter table...concatenate and list bucketing case. */
-    HIVEMERGECURRENTJOBCONCATENATELISTBUCKETING(
-        "hive.merge.current.job.concatenate.list.bucketing", true, ""),
-    /* This is not for clients. but only for block merge task. */
-    /* This is used by BlockMergeTask to send out flag to RCFileMergeMapper */
-    /* about depth of list bucketing. */
-    HIVEMERGECURRENTJOBCONCATENATELISTBUCKETINGDEPTH(
-            "hive.merge.current.job.concatenate.list.bucketing.depth", 0, ""),
     HIVEOPTLISTBUCKETING("hive.optimize.listbucketing", false,
         "Enable list bucketing optimizer. Default value is false so that we disable it by default."),
 

Modified: hive/branches/cbo/contrib/src/test/results/clientpositive/udf_example_arraymapstruct.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/contrib/src/test/results/clientpositive/udf_example_arraymapstruct.q.out?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/contrib/src/test/results/clientpositive/udf_example_arraymapstruct.q.out (original)
+++ hive/branches/cbo/contrib/src/test/results/clientpositive/udf_example_arraymapstruct.q.out Mon Sep 15 22:46:44 2014
@@ -34,14 +34,14 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src_thrift
-            Statistics: Num rows: 11 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: example_arraysum(lint) (type: double), example_mapconcat(mstringstring) (type: string), example_structprint(lintstring[0]) (type: string)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 11 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 11 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

Modified: hive/branches/cbo/data/conf/tez/hive-site.xml
URL: http://svn.apache.org/viewvc/hive/branches/cbo/data/conf/tez/hive-site.xml?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
Binary files - no diff available.

Modified: hive/branches/cbo/data/files/complex.seq
URL: http://svn.apache.org/viewvc/hive/branches/cbo/data/files/complex.seq?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
Binary files - no diff available.

Modified: hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java (original)
+++ hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java Mon Sep 15 22:46:44 2014
@@ -103,7 +103,7 @@ public class TestHiveHistory extends Tes
         db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
         db.createTable(src, cols, null, TextInputFormat.class,
             IgnoreKeyTextOutputFormat.class);
-        db.loadTable(hadoopDataFile[i], src, false, false, false, false);
+        db.loadTable(hadoopDataFile[i], src, false, false, false, false, false);
         i++;
       }
 

Modified: hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java (original)
+++ hive/branches/cbo/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java Mon Sep 15 22:46:44 2014
@@ -147,10 +147,10 @@ public class TestCompactor {
     execSelectAndDumpData("select * from " + tblNameStg, driver, "Dumping data for " +
       tblNameStg + " after load:");
     executeStatementOnDriver("FROM " + tblNameStg +
-      " INSERT OVERWRITE TABLE " + tblName + " PARTITION(bkt=0) " +
+      " INSERT INTO TABLE " + tblName + " PARTITION(bkt=0) " +
       "SELECT a, b where a < 2", driver);
     executeStatementOnDriver("FROM " + tblNameStg +
-      " INSERT OVERWRITE TABLE " + tblName + " PARTITION(bkt=1) " +
+      " INSERT INTO TABLE " + tblName + " PARTITION(bkt=1) " +
       "SELECT a, b where a >= 2", driver);
     execSelectAndDumpData("select * from " + tblName, driver, "Dumping data for " +
       tblName + " after load:");

Modified: hive/branches/cbo/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/cbo/itests/src/test/resources/testconfiguration.properties?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/cbo/itests/src/test/resources/testconfiguration.properties Mon Sep 15 22:46:44 2014
@@ -56,6 +56,7 @@ minitez.query.files.shared=alter_merge_2
   bucket3.q,\
   bucket4.q,\
   cbo_correctness.q,\
+  correlationoptimizer1.q,\
   count.q,\
   create_merge_compressed.q,\
   cross_join.q,\
@@ -63,6 +64,14 @@ minitez.query.files.shared=alter_merge_2
   cross_product_check_2.q,\
   ctas.q,\
   custom_input_output_format.q,\
+  delete_all_non_partitioned.q,\
+  delete_all_partitioned.q,\
+  delete_orig_table.q,\
+  delete_tmp_table.q,\
+  delete_where_no_match.q,\
+  delete_where_non_partitioned.q,\
+  delete_where_partitioned.q,\
+  delete_whole_partition.q,\
   disable_merge_for_bucketing.q,\
   dynpart_sort_opt_vectorization.q,\
   dynpart_sort_optimization.q,\
@@ -76,6 +85,13 @@ minitez.query.files.shared=alter_merge_2
   insert1.q,\
   insert_into1.q,\
   insert_into2.q,\
+  insert_orig_table.q,\
+  insert_values_dynamic_partitioned.q,\
+  insert_values_non_partitioned.q,\
+  insert_values_orig_table.q\
+  insert_values_partitioned.q,\
+  insert_values_tmp_table.q,\
+  insert_update_delete.q,\
   join0.q,\
   join1.q,\
   leftsemijoin.q,\
@@ -96,6 +112,11 @@ minitez.query.files.shared=alter_merge_2
   orc_merge2.q,\
   orc_merge3.q,\
   orc_merge4.q,\
+  orc_merge5.q,\
+  orc_merge6.q,\
+  orc_merge7.q,\
+  orc_merge_incompat1.q,\
+  orc_merge_incompat2.q,\
   parallel.q,\
   ptf.q,\
   sample1.q,\
@@ -122,6 +143,16 @@ minitez.query.files.shared=alter_merge_2
   union7.q,\
   union8.q,\
   union9.q,\
+  update_after_multiple_inserts.q,\
+  update_all_non_partitioned.q,\
+  update_all_partitioned.q,\
+  update_all_types.q,\
+  update_orig_table.q,\
+  update_tmp_table.q,\
+  update_where_no_match.q,\
+  update_where_non_partitioned.q,\
+  update_where_partitioned.q,\
+  update_two_cols.q,\
   vector_cast_constant.q,\
   vector_data_types.q,\
   vector_decimal_aggregate.q,\

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java Mon Sep 15 22:46:44 2014
@@ -21,8 +21,6 @@ package org.apache.hadoop.hive.metastore
 import static org.apache.commons.lang.StringUtils.join;
 import static org.apache.commons.lang.StringUtils.repeat;
 
-import java.math.BigDecimal;
-import java.nio.ByteBuffer;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.text.ParseException;
@@ -42,13 +40,11 @@ import javax.jdo.datastore.JDOConnection
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.math3.stat.StatUtils;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Decimal;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Order;

Modified: hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/branches/cbo/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Mon Sep 15 22:46:44 2014
@@ -1081,7 +1081,8 @@ public class TxnHandler {
   private static Map<LockType, Map<LockType, Map<LockState, LockAction>>> jumpTable;
 
   private void checkQFileTestHack() {
-    boolean hackOn = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST);
+    boolean hackOn = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST) ||
+        HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEZ_TEST);
     if (hackOn) {
       LOG.info("Hacking in canned values for transaction manager");
       // Set up the transaction/locking db in the derby metastore

Modified: hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java (original)
+++ hive/branches/cbo/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java Mon Sep 15 22:46:44 2014
@@ -11,7 +11,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
-import org.apache.hadoop.util.ExitUtil;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -52,7 +51,7 @@ public class TestHiveMetaStorePartitionS
     public void checkExit(int status) {
 
       super.checkExit(status);
-      throw new ExitUtil.ExitException(status, "System.exit() was called. Raising exception. ");
+      throw new RuntimeException("System.exit() was called. Raising exception. ");
     }
   }
 

Modified: hive/branches/cbo/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java (original)
+++ hive/branches/cbo/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java Mon Sep 15 22:46:44 2014
@@ -7,10 +7,6 @@
 package org.apache.hadoop.hive.ql.plan.api;
 
 
-import java.util.Map;
-import java.util.HashMap;
-import org.apache.thrift.TEnum;
-
 public enum OperatorType implements org.apache.thrift.TEnum {
   JOIN(0),
   MAPJOIN(1),
@@ -33,7 +29,9 @@ public enum OperatorType implements org.
   PTF(18),
   MUX(19),
   DEMUX(20),
-  EVENT(21);
+  EVENT(21),
+  ORCFILEMERGE(22),
+  RCFILEMERGE(23);
 
   private final int value;
 
@@ -98,6 +96,10 @@ public enum OperatorType implements org.
         return DEMUX;
       case 21:
         return EVENT;
+      case 22:
+        return ORCFILEMERGE;
+      case 23:
+        return RCFILEMERGE;
       default:
         return null;
     }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Context.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Context.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Context.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Context.java Mon Sep 15 22:46:44 2014
@@ -43,6 +43,7 @@ import org.apache.hadoop.hive.common.Fil
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.TaskRunner;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObj;
@@ -98,6 +99,11 @@ public class Context {
   // Transaction manager for this query
   protected HiveTxnManager hiveTxnManager;
 
+  // Used to track what type of acid operation (insert, update, or delete) we are doing.  Useful
+  // since we want to change where bucket columns are accessed in some operators and
+  // optimizations when doing updates and deletes.
+  private AcidUtils.Operation acidOperation = AcidUtils.Operation.NOT_ACID;
+
   private boolean needLockMgr;
 
   // Keep track of the mapping from load table desc to the output and the lock
@@ -612,4 +618,12 @@ public class Context {
   public void setTryCount(int tryCount) {
     this.tryCount = tryCount;
   }
+
+  public void setAcidOperation(AcidUtils.Operation op) {
+    acidOperation = op;
+  }
+
+  public AcidUtils.Operation getAcidOperation() {
+    return acidOperation;
+  }
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Mon Sep 15 22:46:44 2014
@@ -96,6 +96,7 @@ import org.apache.hadoop.hive.ql.parse.S
 import org.apache.hadoop.hive.ql.parse.SemanticAnalyzerFactory;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.VariableSubstitution;
+import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
@@ -137,6 +138,9 @@ public class Driver implements CommandPr
   private String SQLState;
   private Throwable downstreamError;
 
+  // A list of FileSinkOperators writing in an ACID compliant manner
+  private Set<FileSinkDesc> acidSinks;
+
   // A limit on the number of threads that can be launched
   private int maxthreads;
   private int tryCount = Integer.MAX_VALUE;
@@ -408,6 +412,9 @@ public class Driver implements CommandPr
       } else {
         sem.analyze(tree, ctx);
       }
+      // Record any ACID compliant FileSinkOperators we saw so we can add our transaction ID to
+      // them later.
+      acidSinks = sem.getAcidFileSinks();
 
       LOG.info("Semantic Analysis Completed");
 
@@ -723,6 +730,11 @@ public class Driver implements CommandPr
         //do not authorize temporary uris
         continue;
       }
+      if (privObject instanceof ReadEntity && ((ReadEntity)privObject).isUpdateOrDelete()) {
+        // Skip this one, as we don't want to check select privileges for the table we're reading
+        // for an update or delete.
+        continue;
+      }
 
       //support for authorization on partitions needs to be added
       String dbname = null;
@@ -859,7 +871,9 @@ public class Driver implements CommandPr
   private int recordValidTxns() {
     try {
       ValidTxnList txns = SessionState.get().getTxnMgr().getValidTxns();
-      conf.set(ValidTxnList.VALID_TXNS_KEY, txns.toString());
+      String txnStr = txns.toString();
+      conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr);
+      LOG.debug("Encoding valid txns info " + txnStr);
       return 0;
     } catch (LockException e) {
       errorMessage = "FAILED: Error in determing valid transactions: " + e.getMessage();
@@ -877,13 +891,44 @@ public class Driver implements CommandPr
    * pretty simple. If all the locks cannot be obtained, error out. Deadlock is avoided by making
    * sure that the locks are lexicographically sorted.
    **/
-  private int acquireReadWriteLocks() {
+  private int acquireLocksAndOpenTxn() {
     PerfLogger perfLogger = PerfLogger.getPerfLogger();
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
 
+    SessionState ss = SessionState.get();
+    HiveTxnManager txnMgr = ss.getTxnMgr();
 
     try {
-      SessionState.get().getTxnMgr().acquireLocks(plan, ctx, userName);
+      // Don't use the userName member, as it may or may not have been set.  Get the value from
+      // conf, which calls into getUGI to figure out who the process is running as.
+      String userFromUGI;
+      try {
+        userFromUGI = conf.getUser();
+      } catch (IOException e) {
+        errorMessage = "FAILED: Error in determining user while acquiring locks: " + e.getMessage();
+        SQLState = ErrorMsg.findSQLState(e.getMessage());
+        downstreamError = e;
+        console.printError(errorMessage,
+            "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
+        return 10;
+      }
+      if (acidSinks != null && acidSinks.size() > 0) {
+        // We are writing to tables in an ACID compliant way, so we need to open a transaction
+        long txnId = ss.getCurrentTxn();
+        if (txnId == SessionState.NO_CURRENT_TXN) {
+          txnId = txnMgr.openTxn(userFromUGI);
+          ss.setCurrentTxn(txnId);
+        }
+        // Set the transaction id in all of the acid file sinks
+        if (acidSinks != null) {
+          for (FileSinkDesc desc : acidSinks) {
+            desc.setTransactionId(txnId);
+          }
+        }
+      }
+
+      txnMgr.acquireLocks(plan, ctx, userFromUGI);
+
       return 0;
     } catch (LockException e) {
       errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage();
@@ -901,13 +946,33 @@ public class Driver implements CommandPr
    * @param hiveLocks
    *          list of hive locks to be released Release all the locks specified. If some of the
    *          locks have already been released, ignore them
+   * @param commit if there is an open transaction and if true, commit,
+   *               if false rollback.  If there is no open transaction this parameter is ignored.
+   *
    **/
-  private void releaseLocks(List<HiveLock> hiveLocks) throws LockException {
+  private void releaseLocksAndCommitOrRollback(List<HiveLock> hiveLocks, boolean commit)
+      throws LockException {
     PerfLogger perfLogger = PerfLogger.getPerfLogger();
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
 
-    if (hiveLocks != null) {
-      SessionState.get().getTxnMgr().getLockManager().releaseLocks(hiveLocks);
+    SessionState ss = SessionState.get();
+    HiveTxnManager txnMgr = ss.getTxnMgr();
+    // If we've opened a transaction we need to commit or rollback rather than explicitly
+    // releasing the locks.
+    if (ss.getCurrentTxn() != SessionState.NO_CURRENT_TXN && ss.isAutoCommit()) {
+      try {
+        if (commit) {
+          txnMgr.commitTxn();
+        } else {
+          txnMgr.rollbackTxn();
+        }
+      } finally {
+        ss.setCurrentTxn(SessionState.NO_CURRENT_TXN);
+      }
+    } else {
+      if (hiveLocks != null) {
+        txnMgr.getLockManager().releaseLocks(hiveLocks);
+      }
     }
     ctx.setHiveLocks(null);
 
@@ -994,7 +1059,7 @@ public class Driver implements CommandPr
     }
     if (ret != 0) {
       try {
-        releaseLocks(ctx.getHiveLocks());
+        releaseLocksAndCommitOrRollback(ctx.getHiveLocks(), false);
       } catch (LockException e) {
         LOG.warn("Exception in releasing locks. "
             + org.apache.hadoop.util.StringUtils.stringifyException(e));
@@ -1097,10 +1162,10 @@ public class Driver implements CommandPr
     }
 
     if (requireLock) {
-      ret = acquireReadWriteLocks();
+      ret = acquireLocksAndOpenTxn();
       if (ret != 0) {
         try {
-          releaseLocks(ctx.getHiveLocks());
+          releaseLocksAndCommitOrRollback(ctx.getHiveLocks(), false);
         } catch (LockException e) {
           // Not much to do here
         }
@@ -1112,7 +1177,7 @@ public class Driver implements CommandPr
     if (ret != 0) {
       //if needRequireLock is false, the release here will do nothing because there is no lock
       try {
-        releaseLocks(ctx.getHiveLocks());
+        releaseLocksAndCommitOrRollback(ctx.getHiveLocks(), false);
       } catch (LockException e) {
         // Nothing to do here
       }
@@ -1121,7 +1186,7 @@ public class Driver implements CommandPr
 
     //if needRequireLock is false, the release here will do nothing because there is no lock
     try {
-      releaseLocks(ctx.getHiveLocks());
+      releaseLocksAndCommitOrRollback(ctx.getHiveLocks(), true);
     } catch (LockException e) {
       errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
       SQLState = ErrorMsg.findSQLState(e.getMessage());
@@ -1666,7 +1731,7 @@ public class Driver implements CommandPr
     destroyed = true;
     if (ctx != null) {
       try {
-        releaseLocks(ctx.getHiveLocks());
+        releaseLocksAndCommitOrRollback(ctx.getHiveLocks(), false);
       } catch (LockException e) {
         LOG.warn("Exception when releasing locking in destroy: " +
             e.getMessage());

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java Mon Sep 15 22:46:44 2014
@@ -404,6 +404,19 @@ public enum ErrorMsg {
       "time."),
   DISTINCT_NOT_SUPPORTED(10285, "Distinct keyword is not support in current context"),
 
+  UPDATEDELETE_PARSE_ERROR(10290, "Encountered parse error while parsing rewritten update or " +
+      "delete query"),
+  UPDATEDELETE_IO_ERROR(10291, "Encountered I/O error while parsing rewritten update or " +
+      "delete query"),
+  UPDATE_CANNOT_UPDATE_PART_VALUE(10292, "Updating values of partition columns is not supported"),
+  INSERT_CANNOT_CREATE_TEMP_FILE(10293, "Unable to create temp file for insert values "),
+  ACID_OP_ON_NONACID_TXNMGR(10294, "Attempt to do update or delete using transaction manager that" +
+      " does not support these operations."),
+  NO_INSERT_OVERWRITE_WITH_ACID(10295, "INSERT OVERWRITE not allowed on table with OutputFormat " +
+      "that implements AcidOutputFormat while transaction manager that supports ACID is in use"),
+  VALUES_TABLE_CONSTRUCTOR_NOT_SUPPORTED(10296,
+      "Values clause with table constructor not yet supported"),
+
   //========================== 20000 range starts here ========================//
   SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."),
   SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. "
@@ -460,7 +473,10 @@ public enum ErrorMsg {
       "to fail because of this, set hive.stats.atomic=false", true),
   STATS_SKIPPING_BY_ERROR(30017, "Skipping stats aggregation by error {0}", true),
   ORC_CORRUPTED_READ(30018, "Corruption in ORC data encountered. To skip reading corrupted "
-      + "data, set " + HiveConf.ConfVars.HIVE_ORC_SKIP_CORRUPT_DATA + " to true");
+      + "data, set " + HiveConf.ConfVars.HIVE_ORC_SKIP_CORRUPT_DATA + " to true"),
+
+
+
   ;
 
   private int errorCode;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java Mon Sep 15 22:46:44 2014
@@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
@@ -305,8 +306,10 @@ public class ColumnStatsTask extends Tas
         List<String> partVals = new ArrayList<String>();
         // Iterate over partition columns to figure out partition name
         for (int i = fields.size() - partColSchema.size(); i < fields.size(); i++) {
-          partVals.add(((PrimitiveObjectInspector)fields.get(i).getFieldObjectInspector()).
-            getPrimitiveJavaObject(list.get(i)).toString());
+          Object partVal = ((PrimitiveObjectInspector)fields.get(i).getFieldObjectInspector()).
+              getPrimitiveJavaObject(list.get(i));
+          partVals.add(partVal == null ? // could be null for default partition
+            this.conf.getVar(ConfVars.DEFAULTPARTITIONNAME) : partVal.toString());
         }
         partName = Warehouse.makePartName(partColSchema, partVals);
       }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Mon Sep 15 22:46:44 2014
@@ -18,32 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
-import static org.apache.commons.lang.StringUtils.join;
-import static org.apache.hadoop.util.StringUtils.stringifyException;
-
-import java.io.BufferedWriter;
-import java.io.DataOutputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Serializable;
-import java.io.Writer;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
 import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
@@ -88,8 +62,9 @@ import org.apache.hadoop.hive.ql.QueryPl
 import org.apache.hadoop.hive.ql.exec.ArchiveUtils.PartSpecInfo;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.io.merge.MergeTask;
-import org.apache.hadoop.hive.ql.io.merge.MergeWork;
+import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
+import org.apache.hadoop.hive.ql.io.merge.MergeFileTask;
+import org.apache.hadoop.hive.ql.io.merge.MergeFileWork;
 import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
 import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateTask;
 import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork;
@@ -133,14 +108,19 @@ import org.apache.hadoop.hive.ql.plan.De
 import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.DropIndexDesc;
 import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+import org.apache.hadoop.hive.ql.plan.FileMergeDesc;
 import org.apache.hadoop.hive.ql.plan.GrantDesc;
 import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL;
+import org.apache.hadoop.hive.ql.plan.ListBucketingCtx;
 import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.LockTableDesc;
 import org.apache.hadoop.hive.ql.plan.MsckDesc;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc;
 import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
 import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
 import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc;
+import org.apache.hadoop.hive.ql.plan.RCFileMergeDesc;
 import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc;
 import org.apache.hadoop.hive.ql.plan.RevokeDesc;
 import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
@@ -194,6 +174,33 @@ import org.apache.hadoop.util.ToolRunner
 import org.apache.hive.common.util.AnnotationUtils;
 import org.stringtemplate.v4.ST;
 
+import java.io.BufferedWriter;
+import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.Serializable;
+import java.io.Writer;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import static org.apache.commons.lang.StringUtils.join;
+import static org.apache.hadoop.util.StringUtils.stringifyException;
+
 /**
  * DDLTask implementation.
  *
@@ -546,15 +553,39 @@ public class DDLTask extends Task<DDLWor
    */
   private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc)
       throws HiveException {
+    ListBucketingCtx lbCtx = mergeFilesDesc.getLbCtx();
+    boolean lbatc = lbCtx == null ? false : lbCtx.isSkewedStoredAsDir();
+    int lbd = lbCtx == null ? 0 : lbCtx.calculateListBucketingLevel();
+
     // merge work only needs input and output.
-    MergeWork mergeWork = new MergeWork(mergeFilesDesc.getInputDir(),
-        mergeFilesDesc.getOutputDir(), mergeFilesDesc.getInputFormatClass());
+    MergeFileWork mergeWork = new MergeFileWork(mergeFilesDesc.getInputDir(),
+        mergeFilesDesc.getOutputDir(), mergeFilesDesc.getInputFormatClass().getName());
     mergeWork.setListBucketingCtx(mergeFilesDesc.getLbCtx());
     mergeWork.resolveConcatenateMerge(db.getConf());
     mergeWork.setMapperCannotSpanPartns(true);
-    mergeWork.setSourceTableInputFormat(mergeFilesDesc.getInputFormatClass());
+    mergeWork.setSourceTableInputFormat(mergeFilesDesc.getInputFormatClass().getName());
+    final FileMergeDesc fmd;
+    if (mergeFilesDesc.getInputFormatClass().equals(RCFileInputFormat.class)) {
+      fmd = new RCFileMergeDesc();
+    } else {
+      // safe to assume else is ORC as semantic analyzer will check for RC/ORC
+      fmd = new OrcFileMergeDesc();
+    }
+
+    fmd.setDpCtx(null);
+    fmd.setHasDynamicPartitions(false);
+    fmd.setListBucketingAlterTableConcatenate(lbatc);
+    fmd.setListBucketingDepth(lbd);
+    fmd.setOutputPath(mergeFilesDesc.getOutputDir());
+
+    Operator<? extends OperatorDesc> mergeOp = OperatorFactory.get(fmd);
+
+    LinkedHashMap<String, Operator<? extends  OperatorDesc>> aliasToWork =
+        new LinkedHashMap<String, Operator<? extends OperatorDesc>>();
+    aliasToWork.put(mergeFilesDesc.getInputDir().toString(), mergeOp);
+    mergeWork.setAliasToWork(aliasToWork);
     DriverContext driverCxt = new DriverContext();
-    MergeTask taskExec = new MergeTask();
+    MergeFileTask taskExec = new MergeFileTask();
     taskExec.initialize(db.getConf(), null, driverCxt);
     taskExec.setWork(mergeWork);
     taskExec.setQueryPlan(this.getQueryPlan());

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Mon Sep 15 22:46:44 2014
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.Context;
@@ -35,8 +36,9 @@ import org.apache.hadoop.hive.ql.exec.mr
 import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask;
 import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
-import org.apache.hadoop.hive.ql.io.merge.MergeTask;
+import org.apache.hadoop.hive.ql.io.merge.MergeFileTask;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObj;
@@ -47,7 +49,13 @@ import org.apache.hadoop.hive.ql.metadat
 import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol;
 import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
-import org.apache.hadoop.hive.ql.plan.*;
+import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
+import org.apache.hadoop.hive.ql.plan.LoadFileDesc;
+import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc;
+import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
+import org.apache.hadoop.hive.ql.plan.MapWork;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.util.StringUtils;
@@ -55,7 +63,12 @@ import org.apache.hadoop.util.StringUtil
 import java.io.IOException;
 import java.io.Serializable;
 import java.security.AccessControlException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
 
 /**
  * MoveTask implementation.
@@ -274,7 +287,8 @@ public class MoveTask extends Task<MoveW
           dc = new DataContainer(table.getTTable());
           db.loadTable(tbd.getSourcePath(), tbd.getTable()
               .getTableName(), tbd.getReplace(), tbd.getHoldDDLTime(), work.isSrcLocal(),
-              isSkewedStoredAsDirs(tbd));
+              isSkewedStoredAsDirs(tbd),
+              work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID);
           if (work.getOutputs() != null) {
             work.getOutputs().add(new WriteEntity(table,
                 (tbd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE :
@@ -294,7 +308,7 @@ public class MoveTask extends Task<MoveW
           while (task.getParentTasks() != null && task.getParentTasks().size() == 1) {
             task = (Task)task.getParentTasks().get(0);
             // If it was a merge task or a local map reduce task, nothing can be inferred
-            if (task instanceof MergeTask || task instanceof MapredLocalTask) {
+            if (task instanceof MergeFileTask || task instanceof MapredLocalTask) {
               break;
             }
 
@@ -354,7 +368,8 @@ public class MoveTask extends Task<MoveW
                 tbd.getReplace(),
                 dpCtx.getNumDPCols(),
                 tbd.getHoldDDLTime(),
-                isSkewedStoredAsDirs(tbd));
+                isSkewedStoredAsDirs(tbd),
+                work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID);
 
             if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) {
               throw new HiveException("This query creates no partitions." +
@@ -389,7 +404,10 @@ public class MoveTask extends Task<MoveW
               // update columnar lineage for each partition
               dc = new DataContainer(table.getTTable(), partn.getTPartition());
 
-              if (SessionState.get() != null) {
+              // Don't set lineage on delete as we don't have all the columns
+              if (SessionState.get() != null &&
+                  work.getLoadTableWork().getWriteType() != AcidUtils.Operation.DELETE &&
+                  work.getLoadTableWork().getWriteType() != AcidUtils.Operation.UPDATE) {
                 SessionState.get().getLineageState().setLineage(tbd.getSourcePath(), dc,
                     table.getCols());
               }
@@ -403,7 +421,8 @@ public class MoveTask extends Task<MoveW
             db.validatePartitionNameCharacters(partVals);
             db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(),
                 tbd.getPartitionSpec(), tbd.getReplace(), tbd.getHoldDDLTime(),
-                tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal());
+                tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(),
+                work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID);
             Partition partn = db.getPartition(table, tbd.getPartitionSpec(),
                 false);
 
@@ -422,8 +441,24 @@ public class MoveTask extends Task<MoveW
          }
         }
         if (SessionState.get() != null && dc != null) {
-          SessionState.get().getLineageState().setLineage(tbd.getSourcePath(), dc,
-              table.getCols());
+          // If we are doing an update or a delete the number of columns in the table will not
+          // match the number of columns in the file sink.  For update there will be one too many
+          // (because of the ROW__ID), and in the case of the delete there will be just the
+          // ROW__ID, which we don't need to worry about from a lineage perspective.
+          List<FieldSchema> tableCols = null;
+          switch (work.getLoadTableWork().getWriteType()) {
+            case DELETE:
+            case UPDATE:
+              // Pass an empty list as no columns will be written to the file.
+              // TODO I should be able to make this work for update
+              tableCols = new ArrayList<FieldSchema>();
+              break;
+
+            default:
+              tableCols = table.getCols();
+              break;
+          }
+          SessionState.get().getLineageState().setLineage(tbd.getSourcePath(), dc, tableCols);
         }
         releaseLocks(tbd);
       }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java?rev=1625176&r1=1625175&r2=1625176&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java Mon Sep 15 22:46:44 2014
@@ -18,10 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
 import org.apache.hadoop.hive.ql.exec.vector.VectorExtractOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorFileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorFilterOperator;
@@ -53,7 +49,9 @@ import org.apache.hadoop.hive.ql.plan.Li
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.MuxDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc;
 import org.apache.hadoop.hive.ql.plan.PTFDesc;
+import org.apache.hadoop.hive.ql.plan.RCFileMergeDesc;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 import org.apache.hadoop.hive.ql.plan.SMBJoinDesc;
 import org.apache.hadoop.hive.ql.plan.ScriptDesc;
@@ -62,6 +60,10 @@ import org.apache.hadoop.hive.ql.plan.Ta
 import org.apache.hadoop.hive.ql.plan.UDTFDesc;
 import org.apache.hadoop.hive.ql.plan.UnionDesc;
 
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
 /**
  * OperatorFactory.
  *
@@ -108,6 +110,10 @@ public final class OperatorFactory {
         AppMasterEventOperator.class));
     opvec.add(new OpTuple<DynamicPruningEventDesc>(DynamicPruningEventDesc.class,
         AppMasterEventOperator.class));
+    opvec.add(new OpTuple<RCFileMergeDesc>(RCFileMergeDesc.class,
+        RCFileMergeOperator.class));
+    opvec.add(new OpTuple<OrcFileMergeDesc>(OrcFileMergeDesc.class,
+        OrcFileMergeOperator.class));
   }
 
   static {