You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by an...@apache.org on 2020/10/20 11:54:43 UTC

[hive] branch master updated: HIVE-24227:sys.replication_metrics table shows incorrect status for failed policies (Arko Sharma, reviewed by Aasha Medhi)

This is an automated email from the ASF dual-hosted git repository.

anishek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 4e19fdb  HIVE-24227:sys.replication_metrics table shows incorrect status for failed policies (Arko Sharma, reviewed by Aasha Medhi)
4e19fdb is described below

commit 4e19fdb72cbede08a73f467dcc8f324c01890a82
Author: Anishek Agarwal <an...@gmail.com>
AuthorDate: Tue Oct 20 17:24:21 2020 +0530

    HIVE-24227:sys.replication_metrics table shows incorrect status for failed policies (Arko Sharma, reviewed by Aasha Medhi)
---
 .../java/org/apache/hadoop/hive/ql/ErrorMsg.java   |   6 +-
 .../test/results/clientnegative/serde_regex.q.out  |   2 +-
 .../hive/ql/parse/TestReplicationScenarios.java    |  19 +-
 ...estReplicationScenariosAcidTablesBootstrap.java |  12 +-
 .../TestReplicationScenariosAcrossInstances.java   |   8 +
 .../TestReplicationScenariosExternalTables.java    |   8 +-
 .../ql/parse/TestStatsReplicationScenarios.java    |  13 +-
 .../TestMetastoreAuthorizationProvider.java        |  10 +-
 .../java/org/apache/hive/jdbc/TestJdbcDriver2.java |   4 +-
 .../src/test/results/negative/kudu_config.q.out    |   2 +-
 .../org/apache/hadoop/hive/ql/ddl/DDLTask.java     |  10 +-
 .../org/apache/hadoop/hive/ql/ddl/DDLWork.java     |  26 ++
 .../hadoop/hive/ql/exec/ColumnStatsUpdateTask.java |   3 +-
 .../org/apache/hadoop/hive/ql/exec/CopyTask.java   |   6 +-
 .../org/apache/hadoop/hive/ql/exec/MoveTask.java   |   8 +-
 .../apache/hadoop/hive/ql/exec/ReplCopyTask.java   |  61 +++-
 .../apache/hadoop/hive/ql/exec/ReplTxnTask.java    |   4 +-
 .../apache/hadoop/hive/ql/exec/repl/AckTask.java   |  12 +-
 .../apache/hadoop/hive/ql/exec/repl/AckWork.java   |  10 +
 .../hadoop/hive/ql/exec/repl/AtlasDumpTask.java    |  28 +-
 .../hadoop/hive/ql/exec/repl/AtlasLoadTask.java    |  30 +-
 .../hadoop/hive/ql/exec/repl/DirCopyTask.java      |   6 +-
 .../hadoop/hive/ql/exec/repl/DirCopyWork.java      |  15 +-
 .../hadoop/hive/ql/exec/repl/RangerDumpTask.java   |  31 +-
 .../hadoop/hive/ql/exec/repl/RangerLoadTask.java   |  33 ++-
 .../hadoop/hive/ql/exec/repl/ReplDumpTask.java     |  31 +-
 .../hadoop/hive/ql/exec/repl/ReplDumpWork.java     |   8 +-
 .../hadoop/hive/ql/exec/repl/ReplLoadTask.java     |  76 +++--
 .../hadoop/hive/ql/exec/repl/ReplLoadWork.java     |   4 +-
 .../hadoop/hive/ql/exec/repl/ReplStateLogTask.java |   6 +-
 .../hadoop/hive/ql/exec/repl/ReplStateLogWork.java |  42 +++
 .../exec/repl/bootstrap/load/LoadConstraint.java   |  26 +-
 .../ql/exec/repl/bootstrap/load/LoadDatabase.java  |  34 ++-
 .../ql/exec/repl/bootstrap/load/LoadFunction.java  |  17 +-
 .../repl/bootstrap/load/table/LoadPartitions.java  |  20 +-
 .../exec/repl/bootstrap/load/table/LoadTable.java  |  20 +-
 .../incremental/IncrementalLoadTasksBuilder.java   |  19 +-
 .../hadoop/hive/ql/exec/repl/util/ReplUtils.java   |  95 ++++++
 .../hive/ql/parse/ImportSemanticAnalyzer.java      | 140 +++++++--
 .../hadoop/hive/ql/parse/repl/dump/Utils.java      |   2 +-
 .../hive/ql/parse/repl/load/DumpMetaData.java      |   4 +
 .../parse/repl/load/message/AbortTxnHandler.java   |   3 +-
 .../load/message/AddCheckConstraintHandler.java    |   3 +-
 .../load/message/AddDefaultConstraintHandler.java  |   3 +-
 .../repl/load/message/AddForeignKeyHandler.java    |   5 +-
 .../load/message/AddNotNullConstraintHandler.java  |   4 +-
 .../repl/load/message/AddPrimaryKeyHandler.java    |   5 +-
 .../load/message/AddUniqueConstraintHandler.java   |   3 +-
 .../repl/load/message/AllocWriteIdHandler.java     |   3 +-
 .../repl/load/message/AlterDatabaseHandler.java    |   4 +-
 .../parse/repl/load/message/CommitTxnHandler.java  |   7 +-
 .../repl/load/message/CreateDatabaseHandler.java   |  15 +-
 .../repl/load/message/CreateFunctionHandler.java   |  10 +-
 .../repl/load/message/DropConstraintHandler.java   |   3 +-
 .../repl/load/message/DropDatabaseHandler.java     |   4 +-
 .../repl/load/message/DropFunctionHandler.java     |   3 +-
 .../repl/load/message/DropPartitionHandler.java    |   4 +-
 .../parse/repl/load/message/DropTableHandler.java  |   3 +-
 .../ql/parse/repl/load/message/InsertHandler.java  |   3 +-
 .../ql/parse/repl/load/message/MessageHandler.java |  40 +++
 .../ql/parse/repl/load/message/OpenTxnHandler.java |   3 +-
 .../repl/load/message/RenamePartitionHandler.java  |   3 +-
 .../repl/load/message/RenameTableHandler.java      |   5 +-
 .../ql/parse/repl/load/message/TableHandler.java   |   6 +-
 .../load/message/TruncatePartitionHandler.java     |   3 +-
 .../repl/load/message/TruncateTableHandler.java    |   5 +-
 .../load/message/UpdatePartColStatHandler.java     |   4 +-
 .../load/message/UpdateTableColStatHandler.java    |   3 +-
 .../repl/metric/ReplicationMetricCollector.java    |  12 +
 .../hadoop/hive/ql/plan/ColumnStatsUpdateWork.java |  31 ++
 .../org/apache/hadoop/hive/ql/plan/CopyWork.java   |  34 +++
 .../hadoop/hive/ql/plan/ImportTableDesc.java       |   8 +
 .../org/apache/hadoop/hive/ql/plan/MoveWork.java   |  26 ++
 .../apache/hadoop/hive/ql/plan/ReplCopyWork.java   |  16 +
 .../apache/hadoop/hive/ql/plan/ReplTxnWork.java    |  48 +++
 .../ql/processors/CommandProcessorException.java   |   4 +
 .../hive/ql/exec/repl/TestRangerDumpTask.java      |  10 +-
 .../TestReplicationMetricUpdateOnFailure.java      | 329 +++++++++++++++++++++
 .../add_partition_with_whitelist.q.out             |   2 +-
 ql/src/test/results/clientnegative/addpart1.q.out  |   2 +-
 .../allow_change_col_type_par_neg.q.out            |   2 +-
 .../clientnegative/alter_external_acid.q.out       |   2 +-
 .../alter_partition_change_col_dup_col.q.out       |   2 +-
 .../alter_partition_change_col_nonexist.q.out      |   2 +-
 .../alter_partition_with_whitelist.q.out           |   2 +-
 .../alter_rename_partition_failure.q.out           |   2 +-
 .../alter_rename_partition_failure2.q.out          |   2 +-
 .../alter_table_constraint_duplicate_pk.q.out      |   2 +-
 .../alter_table_constraint_invalid_fk_col1.q.out   |   2 +-
 .../alter_table_constraint_invalid_fk_col2.q.out   |   2 +-
 .../alter_table_constraint_invalid_fk_tbl1.q.out   |   2 +-
 .../alter_table_constraint_invalid_fk_tbl2.q.out   |   2 +-
 .../alter_table_constraint_invalid_pk_col.q.out    |   2 +-
 .../alter_table_constraint_invalid_pk_tbl.q.out    |   2 +-
 .../alter_table_constraint_invalid_ref.q.out       |   2 +-
 .../clientnegative/alter_table_wrong_db.q.out      |   2 +-
 .../alter_table_wrong_location2.q.out              |   2 +-
 .../clientnegative/alter_table_wrong_regex.q.out   |   2 +-
 ql/src/test/results/clientnegative/altern1.q.out   |   2 +-
 ql/src/test/results/clientnegative/archive1.q.out  |   2 +-
 ql/src/test/results/clientnegative/archive2.q.out  |   2 +-
 .../results/clientnegative/archive_multi1.q.out    |   2 +-
 .../results/clientnegative/archive_multi2.q.out    |   2 +-
 .../results/clientnegative/archive_multi3.q.out    |   2 +-
 .../results/clientnegative/archive_multi4.q.out    |   2 +-
 .../results/clientnegative/archive_multi5.q.out    |   2 +-
 .../results/clientnegative/archive_multi6.q.out    |   2 +-
 .../authorization_cannot_create_default_role.q.out |   2 +-
 .../authorization_caseinsensitivity.q.out          |   2 +-
 .../authorization_create_role_no_admin.q.out       |   2 +-
 .../authorization_drop_admin_role.q.out            |   2 +-
 .../authorization_drop_role_no_admin.q.out         |   2 +-
 .../clientnegative/authorization_fail_1.q.out      |   2 +-
 .../clientnegative/authorization_fail_8.q.out      |   2 +-
 .../clientnegative/authorization_grant_group.q.out |   2 +-
 .../authorization_grant_table_allpriv.q.out        |   2 +-
 .../authorization_grant_table_dup.q.out            |   2 +-
 .../authorization_grant_table_fail1.q.out          |   2 +-
 .../authorization_grant_table_fail_nogrant.q.out   |   2 +-
 .../authorization_invalid_priv_v2.q.out            |   2 +-
 .../authorization_priv_current_role_neg.q.out      |   2 +-
 .../authorization_public_create.q.out              |   2 +-
 .../clientnegative/authorization_public_drop.q.out |   2 +-
 .../authorization_revoke_table_fail1.q.out         |   2 +-
 .../authorization_revoke_table_fail2.q.out         |   2 +-
 .../clientnegative/authorization_role_case.q.out   |   2 +-
 .../authorization_role_cycles1.q.out               |   2 +-
 .../authorization_role_cycles2.q.out               |   2 +-
 .../clientnegative/authorization_role_grant.q.out  |   2 +-
 .../clientnegative/authorization_role_grant2.q.out |   2 +-
 .../authorization_role_grant_nosuchrole.q.out      |   2 +-
 .../authorization_role_grant_otherrole.q.out       |   2 +-
 .../authorization_role_grant_otheruser.q.out       |   2 +-
 .../authorization_set_role_neg1.q.out              |   2 +-
 .../authorization_set_role_neg2.q.out              |   2 +-
 .../authorization_show_grant_otherrole.q.out       |   2 +-
 .../authorization_show_grant_otheruser_all.q.out   |   2 +-
 ...uthorization_show_grant_otheruser_alltabs.q.out |   2 +-
 .../authorization_show_grant_otheruser_wtab.q.out  |   2 +-
 ...thorization_show_role_principals_no_admin.q.out |   2 +-
 .../authorization_show_roles_no_admin.q.out        |   2 +-
 .../authorization_table_grant_nosuchrole.q.out     |   2 +-
 .../clientnegative/authorize_grant_public.q.out    |   2 +-
 .../clientnegative/authorize_revoke_public.q.out   |   2 +-
 .../clientnegative/avro_add_column_extschema.q.out |   2 +-
 .../test/results/clientnegative/avro_decimal.q.out |   2 +-
 .../results/clientnegative/column_rename1.q.out    |   2 +-
 .../results/clientnegative/column_rename2.q.out    |   2 +-
 .../results/clientnegative/column_rename4.q.out    |   2 +-
 .../clientnegative/compact_non_acid_table.q.out    |   2 +-
 .../clientnegative/create_external_acid.q.out      |   2 +-
 .../results/clientnegative/create_not_acid.q.out   |   2 +-
 .../clientnegative/create_table_wrong_regex.q.out  |   2 +-
 .../clientnegative/create_view_failure1.q.out      |   2 +-
 .../clientnegative/create_view_failure2.q.out      |   2 +-
 .../clientnegative/create_view_failure4.q.out      |   2 +-
 .../create_with_constraints_duplicate_name.q.out   |   2 +-
 .../clientnegative/create_with_fk_constraint.q.out |   2 +-
 .../create_with_fk_pk_same_tab.q.out               |   2 +-
 .../create_with_fk_uk_same_tab.q.out               |   2 +-
 .../clientnegative/create_with_fk_wrong_ref.q.out  |   2 +-
 .../clientnegative/create_with_fk_wrong_ref2.q.out |   2 +-
 .../database_create_already_exists.q.out           |   2 +-
 .../database_create_invalid_name.q.out             |   2 +-
 .../clientnegative/database_drop_not_empty.q.out   |   2 +-
 .../database_drop_not_empty_restrict.q.out         |   2 +-
 .../database_location_conflict.q.out               |   2 +-
 .../database_location_conflict2.q.out              |   2 +-
 .../database_location_conflict3.q.out              |   2 +-
 .../results/clientnegative/dbtxnmgr_nodblock.q.out |   2 +-
 .../clientnegative/dbtxnmgr_nodbunlock.q.out       |   2 +-
 .../clientnegative/dbtxnmgr_notablelock.q.out      |   2 +-
 .../clientnegative/dbtxnmgr_notableunlock.q.out    |   2 +-
 ql/src/test/results/clientnegative/deletejar.q.out |   2 +-
 .../results/clientnegative/describe_xpath1.q.out   |   2 +-
 .../results/clientnegative/describe_xpath2.q.out   |   2 +-
 .../results/clientnegative/describe_xpath3.q.out   |   2 +-
 .../results/clientnegative/describe_xpath4.q.out   |   2 +-
 .../disallow_incompatible_type_change_on1.q.out    |   2 +-
 .../disallow_incompatible_type_change_on2.q.out    |   2 +-
 .../clientnegative/drop_invalid_constraint1.q.out  |   2 +-
 .../clientnegative/drop_invalid_constraint2.q.out  |   2 +-
 .../clientnegative/drop_invalid_constraint3.q.out  |   2 +-
 .../clientnegative/drop_invalid_constraint4.q.out  |   2 +-
 .../clientnegative/drop_table_failure2.q.out       |   2 +-
 .../clientnegative/drop_table_used_by_mv.q.out     |   2 +-
 .../clientnegative/drop_table_used_by_mv2.q.out    |   2 +-
 .../clientnegative/drop_view_failure1.q.out        |   2 +-
 .../results/clientnegative/druid_address.q.out     |   2 +-
 .../results/clientnegative/druid_buckets.q.out     |   2 +-
 .../test/results/clientnegative/druid_case.q.out   |   2 +-
 .../results/clientnegative/druid_datasource.q.out  |   2 +-
 .../results/clientnegative/druid_datasource2.q.out |   2 +-
 .../results/clientnegative/druid_location.q.out    |   2 +-
 .../results/clientnegative/druid_partitions.q.out  |   2 +-
 .../test/results/clientnegative/dyn_part_max.q.out |   2 +-
 .../clientnegative/exchange_partition.q.out        |   2 +-
 ql/src/test/results/clientnegative/external1.q.out |   2 +-
 ql/src/test/results/clientnegative/external2.q.out |   2 +-
 ...ms_using_serde_alter_table_update_columns.q.out |   2 +-
 .../results/clientnegative/insert_sorted.q.out     |   2 +-
 ql/src/test/results/clientnegative/lockneg2.q.out  |   2 +-
 ql/src/test/results/clientnegative/lockneg3.q.out  |   2 +-
 ql/src/test/results/clientnegative/lockneg4.q.out  |   2 +-
 ql/src/test/results/clientnegative/lockneg5.q.out  |   2 +-
 .../lockneg_query_tbl_in_locked_db.q.out           |   2 +-
 .../clientnegative/materialized_view_drop.q.out    |   2 +-
 .../clientnegative/materialized_view_drop2.q.out   |   2 +-
 .../materialized_view_no_cbo_rewrite_2.q.out       |   2 +-
 ...terialized_view_no_supported_op_rewrite_2.q.out |   2 +-
 .../test/results/clientnegative/mm_convert.q.out   |   2 +-
 .../clientnegative/nested_complex_neg.q.out        |   2 +-
 .../clientnegative/orc_change_fileformat.q.out     |   2 +-
 .../orc_change_fileformat_acid.q.out               |   2 +-
 .../results/clientnegative/orc_change_serde.q.out  |   2 +-
 .../clientnegative/orc_change_serde_acid.q.out     |   2 +-
 .../clientnegative/orc_reorder_columns1.q.out      |   2 +-
 .../clientnegative/orc_reorder_columns1_acid.q.out |   2 +-
 .../clientnegative/orc_reorder_columns2.q.out      |   2 +-
 .../clientnegative/orc_reorder_columns2_acid.q.out |   2 +-
 .../clientnegative/orc_replace_columns1.q.out      |   2 +-
 .../clientnegative/orc_replace_columns1_acid.q.out |   2 +-
 .../clientnegative/orc_replace_columns2.q.out      |   2 +-
 .../clientnegative/orc_replace_columns2_acid.q.out |   2 +-
 .../clientnegative/orc_replace_columns3.q.out      |   2 +-
 .../clientnegative/orc_replace_columns3_acid.q.out |   2 +-
 .../clientnegative/orc_type_promotion1.q.out       |   2 +-
 .../clientnegative/orc_type_promotion1_acid.q.out  |   2 +-
 .../clientnegative/orc_type_promotion2.q.out       |   2 +-
 .../clientnegative/orc_type_promotion2_acid.q.out  |   2 +-
 .../clientnegative/orc_type_promotion3.q.out       |   2 +-
 .../clientnegative/orc_type_promotion3_acid.q.out  |   2 +-
 .../parquet_alter_part_table_drop_columns.q.out    |   2 +-
 .../test/results/clientnegative/serde_regex.q.out  |   2 +-
 .../test/results/clientnegative/serde_regex3.q.out |   2 +-
 .../special_character_in_tabnames_1.q.out          |   2 +-
 .../clientnegative/strict_managed_tables1.q.out    |   2 +-
 .../clientnegative/strict_managed_tables2.q.out    |   2 +-
 .../clientnegative/strict_managed_tables3.q.out    |   2 +-
 .../clientnegative/strict_managed_tables4.q.out    |   2 +-
 .../clientnegative/strict_managed_tables5.q.out    |   2 +-
 .../clientnegative/strict_managed_tables6.q.out    |   2 +-
 .../clientnegative/temp_table_addpart1.q.out       |   2 +-
 ...temp_table_alter_rename_partition_failure.q.out |   2 +-
 ...emp_table_alter_rename_partition_failure2.q.out |   2 +-
 .../temp_table_exchange_partitions.q.out           |   2 +-
 .../results/clientnegative/temp_table_rename.q.out |   2 +-
 ql/src/test/results/clientnegative/touch1.q.out    |   2 +-
 .../encrypted/encryption_move_tbl.q.out            |   4 +-
 .../results/clientpositive/llap/resourceplan.q.out |  62 ++--
 .../apache/hive/service/cli/CLIServiceTest.java    |   2 +-
 .../service/cli/thrift/ThriftCLIServiceTest.java   |   2 +-
 252 files changed, 1560 insertions(+), 436 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 9b50f75..6a7b45c 100644
--- a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -625,7 +625,9 @@ public enum ErrorMsg {
   REPL_INVALID_INTERNAL_CONFIG_FOR_SERVICE(40009, "Invalid internal config error : {0} for {1} service.", true),
   REPL_RETRY_EXHAUSTED(40010, "Retry exhausted for retryable error code {0}.", true),
   REPL_FAILED_WITH_NON_RECOVERABLE_ERROR(40011, "Replication failed with non recoverable error. Needs manual intervention"),
-  REPL_INVALID_ARGUMENTS(40012, "Invalid arguments error : {0}.", true)
+  REPL_INVALID_ARGUMENTS(40012, "Invalid arguments error : {0}.", true),
+  REPL_INVALID_ALTER_TABLE(40013, "{0}Unable to alter table{1}", true),
+  REPL_PERMISSION_DENIED(40014, "{0}org.apache.hadoop.security.AccessControlException{1}", true)
   ;
 
   private int errorCode;
@@ -645,7 +647,7 @@ public enum ErrorMsg {
     for (ErrorMsg errorMsg : values()) {
       if (errorMsg.format != null) {
         String pattern = errorMsg.mesg.replaceAll("\\{[0-9]+\\}", ".*");
-        formatToErrorMsgMap.put(Pattern.compile("^" + pattern + "$"), errorMsg);
+        formatToErrorMsgMap.put(Pattern.compile("^" + pattern + "$", Pattern.DOTALL), errorMsg);
       } else {
         mesgToErrorMsgMap.put(errorMsg.getMsg().trim(), errorMsg);
         int length = errorMsg.getMsg().trim().length();
diff --git a/contrib/src/test/results/clientnegative/serde_regex.q.out b/contrib/src/test/results/clientnegative/serde_regex.q.out
index 69a5403..e7d3aa5 100644
--- a/contrib/src/test/results/clientnegative/serde_regex.q.out
+++ b/contrib/src/test/results/clientnegative/serde_regex.q.out
@@ -78,4 +78,4 @@ STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@serde_regex
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.contrib.serde2.RegexSerDe only accepts string columns, but column[5] named status has type int)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.contrib.serde2.RegexSerDe only accepts string columns, but column[5] named status has type int)
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index ca06396..bef0a95 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -101,12 +101,15 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.nio.charset.StandardCharsets;
+import java.util.Base64;
 
 import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
 import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
 import static org.apache.hadoop.hive.metastore.Warehouse.getFs;
 import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT;
 import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.DUMP_ACKNOWLEDGEMENT;
+import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.NON_RECOVERABLE_MARKER;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -1139,7 +1142,7 @@ public class TestReplicationScenarios {
         driver.run("REPL DUMP " + dbName);
         assert false;
       } catch (CommandProcessorException e) {
-        assertTrue(e.getResponseCode() == ErrorMsg.REPL_EVENTS_MISSING_IN_METASTORE.getErrorCode());
+        assertTrue(e.getCauseMessage() == ErrorMsg.REPL_EVENTS_MISSING_IN_METASTORE.getMsg());
       }
       eventIdSkipper.assertInjectionsPerformed(true,false);
     } finally {
@@ -4347,4 +4350,18 @@ public class TestReplicationScenarios {
       }
     }
   }
+
+  public static Path getNonRecoverablePath(Path dumpDir, String dbName, HiveConf conf) throws IOException {
+    Path dumpPath = new Path(dumpDir,
+            Base64.getEncoder().encodeToString(dbName.toLowerCase()
+                    .getBytes(StandardCharsets.UTF_8.name())));
+    FileSystem fs = dumpPath.getFileSystem(conf);
+    if (fs.exists(dumpPath)) {
+      FileStatus[] statuses = fs.listStatus(dumpPath);
+      if (statuses.length > 0) {
+        return new Path(statuses[0].getPath(), NON_RECOVERABLE_MARKER.toString());
+      }
+    }
+    return null;
+  }
 }
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java
index f52975a..145e19a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java
@@ -26,8 +26,10 @@ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore;
 import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.CallerArguments;
 import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection;
-import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 
 import org.junit.Assert;
 import org.junit.Test;
@@ -36,12 +38,11 @@ import org.junit.BeforeClass;
 import javax.annotation.Nullable;
 import java.io.File;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.Collections;
 import java.util.Map;
-
 /**
  * TestReplicationScenariosAcidTables - test bootstrap of ACID tables during an incremental.
  */
@@ -138,6 +139,11 @@ public class TestReplicationScenariosAcidTablesBootstrap
     } finally {
       InjectableBehaviourObjectStore.resetAlterTableModifier();
     }
+    Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR));
+    Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf);
+    if(nonRecoverablePath != null){
+      baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true);
+    }
     //Load again should succeed as checkpointing is in place
     replica.load(replicatedDbName, primaryDbName);
     verifyIncLoad(replicatedDbName, incDump.lastReplicationId);
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
index 0cc7282..dbdee9d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
@@ -1571,6 +1571,14 @@ public class TestReplicationScenariosAcrossInstances extends BaseReplicationAcro
     // Retry with same dump with which it was already loaded should resume the bootstrap load. Make sure that table t1,
     // is loaded before t2. So that scope is set to table in first iteration for table t1. In the next iteration, it
     // loads only remaining partitions of t2, so that the table tracker has no tasks.
+
+    Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR));
+    Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName);
+    if(nonRecoverablePath != null){
+      baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true);
+    }
+
+
     List<String> withConfigs = Arrays.asList("'hive.in.repl.test.files.sorted'='true'");
     replica.load(replicatedDbName, primaryDbName, withConfigs);
 
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java
index f4ef716..2b04ba6 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.parse.TestReplicationScenarios;
 import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
@@ -909,6 +910,12 @@ public class TestReplicationScenariosExternalTables extends BaseReplicationAcros
       InjectableBehaviourObjectStore.resetAlterTableModifier();
     }
 
+    Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR));
+    Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf);
+    if(nonRecoverablePath != null){
+      baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true);
+    }
+
     replica.load(replicatedDbName, primaryDbName)
             .run("use " + replicatedDbName)
             .run("show tables like 't1'")
@@ -1324,5 +1331,4 @@ public class TestReplicationScenariosExternalTables extends BaseReplicationAcros
       return File.separator + dbName.toLowerCase() + File.separator + FILE_NAME;
     }
   }
-
 }
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
index 7e76215..cfd7437 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
@@ -33,6 +33,9 @@ import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore;
 import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection;
 import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.CallerArguments;
 import org.apache.hadoop.hive.shims.Utils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -342,7 +345,13 @@ public class TestStatsReplicationScenarios {
         failIncrementalLoad();
       }
     }
-
+    
+    Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR));
+    Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf);
+    if(nonRecoverablePath != null){
+      baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true);
+    }
+    
     // Load, possibly a retry
     replica.load(replicatedDbName, primaryDbName);
 
@@ -665,7 +674,7 @@ public class TestStatsReplicationScenarios {
     lastReplicationId = dumpLoadVerify(tableNames, lastReplicationId, parallelBootstrap,
             metadataOnly, false);
   }
-
+  
   @Test
   public void testNonParallelBootstrapLoad() throws Throwable {
     LOG.info("Testing " + testName.getClass().getName() + "." + testName.getMethodName());
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
index 2130486..ef9ea73 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
@@ -189,7 +189,7 @@ public class TestMetastoreAuthorizationProvider {
       driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName));
       assert false;
     } catch (CommandProcessorException e) {
-      assertEquals(1, e.getResponseCode());
+      assertEquals(40000, e.getResponseCode());
     }
 
     // Even if table location is specified table creation should fail
@@ -202,7 +202,7 @@ public class TestMetastoreAuthorizationProvider {
         driver.run(String.format(
             "create table %s (a string) partitioned by (b string) location '" +tblLocation + "'", tblNameLoc));
       } catch (CommandProcessorException e) {
-        assertEquals(1, e.getResponseCode());
+        assertEquals(40000, e.getResponseCode());
       }
     }
 
@@ -265,7 +265,7 @@ public class TestMetastoreAuthorizationProvider {
     try {
       driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName+"mal"));
     } catch (CommandProcessorException e) {
-      assertEquals(1, e.getResponseCode());
+      assertEquals(40000, e.getResponseCode());
     }
 
     ttbl.setTableName(tblName+"mal");
@@ -282,7 +282,7 @@ public class TestMetastoreAuthorizationProvider {
     try {
       driver.run("alter table "+tblName+" add partition (b='2011')");
     } catch (CommandProcessorException e) {
-      assertEquals(1, e.getResponseCode());
+      assertEquals(40000, e.getResponseCode());
     }
 
     List<String> ptnVals = new ArrayList<String>();
@@ -341,7 +341,7 @@ public class TestMetastoreAuthorizationProvider {
     try {
       driver.run("drop table "+tbl.getTableName());
     } catch (CommandProcessorException e) {
-      assertEquals(1, e.getResponseCode());
+      assertEquals(40000, e.getResponseCode());
     }
   }
 
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index f4e7c4f..14eaf60 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -1117,8 +1117,8 @@ public class TestJdbcDriver2 {
     // codes and messages. This should be fixed.
     doTestErrorCase(
         "create table " + tableName + " (key int, value string)",
-        "FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask",
-        "08S01", 1);
+        "FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask",
+        "08S01", 40000);
   }
 
   private void doTestErrorCase(String sql, String expectedMessage,
diff --git a/kudu-handler/src/test/results/negative/kudu_config.q.out b/kudu-handler/src/test/results/negative/kudu_config.q.out
index b0071e7..c7525e9 100644
--- a/kudu-handler/src/test/results/negative/kudu_config.q.out
+++ b/kudu-handler/src/test/results/negative/kudu_config.q.out
@@ -4,4 +4,4 @@ TBLPROPERTIES ("kudu.table_name" = "default.kudu_kv")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@kv_table
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException java.io.IOException: Kudu master addresses are not specified in the table property (kudu.master_addresses), or default configuration (hive.kudu.master.addresses.default).)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException java.io.IOException: Kudu master addresses are not specified in the table property (kudu.master_addresses), or default configuration (hive.kudu.master.addresses.default).)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask.java
index 3e44964..04b9451 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLTask.java
@@ -27,6 +27,7 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.reflections.Reflections;
@@ -67,6 +68,7 @@ public final class DDLTask extends Task<DDLWork> implements Serializable {
       return 0;
     }
 
+    DDLOperation ddlOperation = null;
     try {
       DDLDesc ddlDesc = work.getDDLDesc();
 
@@ -76,14 +78,18 @@ public final class DDLTask extends Task<DDLWork> implements Serializable {
         Class<? extends DDLOperation> ddlOpertaionClass = DESC_TO_OPARATION.get(ddlDesc.getClass());
         Constructor<? extends DDLOperation> constructor =
             ddlOpertaionClass.getConstructor(DDLOperationContext.class, ddlDesc.getClass());
-        DDLOperation ddlOperation = constructor.newInstance(ddlOperationContext, ddlDesc);
+        ddlOperation = constructor.newInstance(ddlOperationContext, ddlDesc);
         return ddlOperation.execute();
       } else {
         throw new IllegalArgumentException("Unknown DDL request: " + ddlDesc.getClass());
       }
     } catch (Throwable e) {
       failed(e);
-      return 1;
+      if(ddlOperation != null) {
+        LOG.error("DDLTask failed, DDL Operation: " + ddlOperation.getClass().toString(), e);
+      }
+      return ReplUtils.handleException(work.isReplication(), e, work.getDumpDirectory(),
+                                       work.getMetricCollector(), getName(), conf); 
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java
index 6eea86b..d590e8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLWork.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.ddl;
 
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
@@ -39,6 +40,19 @@ public final class DDLWork implements Serializable {
   private Set<ReadEntity> inputs;
   /** List of WriteEntities that are passed to the hooks. */
   private Set<WriteEntity> outputs;
+  private boolean isReplication;
+  private String dumpDirectory;
+  private transient ReplicationMetricCollector metricCollector;
+
+  public DDLWork(Set<ReadEntity> inputs, Set<WriteEntity> outputs, DDLDesc ddlDesc, boolean isReplication,
+                 String dumpDirectory, ReplicationMetricCollector metricCollector) {
+    this.inputs = inputs;
+    this.outputs = outputs;
+    this.ddlDesc = ddlDesc;
+    this.isReplication = isReplication;
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+  }
 
   public DDLWork(Set<ReadEntity> inputs, Set<WriteEntity> outputs, DDLDesc ddlDesc) {
     this.inputs = inputs;
@@ -54,6 +68,18 @@ public final class DDLWork implements Serializable {
     return outputs;
   }
 
+  public ReplicationMetricCollector getMetricCollector() {
+    return metricCollector;
+  }
+
+  public String getDumpDirectory() {
+    return dumpDirectory;
+  }
+
+  public boolean isReplication() {
+    return isReplication;
+  }
+
   public boolean getNeedLock() {
     return needLock;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
index 7844dd6..d8492a1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
@@ -355,8 +355,9 @@ public class ColumnStatsUpdateTask extends Task<ColumnStatsUpdateWork> {
     } catch (Exception e) {
       setException(e);
       LOG.info("Failed to persist stats in metastore", e);
+      return ReplUtils.handleException(work.isReplication(), e, work.getDumpDirectory(), work.getMetricCollector(),
+                                       getName(), conf);
     }
-    return 1;
   }
 
   @Override
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
index 5ffc110..5b101df 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.plan.CopyWork;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.util.StringUtils;
@@ -103,7 +104,10 @@ public class CopyTask extends Task<CopyWork> implements Serializable {
     } catch (Exception e) {
       console.printError("Failed with exception " + e.getMessage(), "\n"
           + StringUtils.stringifyException(e));
-      return (1);
+      LOG.error("CopyTask failed", e);
+      setException(e);
+      return ReplUtils.handleException(work.isReplication(), e, work.getDumpDirectory(), work.getMetricCollector(),
+              getName(), conf);
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 5290501..31f634e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.ddl.DDLUtils;
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
 import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask;
+import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
@@ -464,14 +465,17 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
           console.printInfo("\n", StringUtils.stringifyException(he),false);
         }
       }
-
       setException(he);
+      errorCode = ReplUtils.handleException(work.isReplication(), he, work.getDumpDirectory(),
+                                            work.getMetricCollector(), getName(), conf);
       return errorCode;
     } catch (Exception e) {
       console.printError("Failed with exception " + e.getMessage(), "\n"
           + StringUtils.stringifyException(e));
       setException(e);
-      return (1);
+      LOG.error("MoveTask failed", e);
+      return ReplUtils.handleException(work.isReplication(), e, work.getDumpDirectory(), work.getMetricCollector(),
+                                       getName(), conf);
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
index 1f40dd0..3b07b73 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
@@ -20,11 +20,11 @@ package org.apache.hadoop.hive.ql.exec;
 
 import org.apache.hadoop.hive.metastore.ReplChangeManager;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.parse.EximUtil;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.plan.CopyWork;
 import org.apache.hadoop.hive.ql.plan.ReplCopyWork;
 import org.apache.hadoop.hive.ql.parse.repl.CopyUtils;
@@ -166,7 +166,8 @@ public class ReplCopyTask extends Task<ReplCopyWork> implements Serializable {
     } catch (Exception e) {
       LOG.error(StringUtils.stringifyException(e));
       setException(e);
-      return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
+      return ReplUtils.handleException(true, e, work.getDumpDirectory(), work.getMetricCollector(),
+              getName(), conf);
     }
   }
 
@@ -230,6 +231,14 @@ public class ReplCopyTask extends Task<ReplCopyWork> implements Serializable {
             readSourceAsFileList, false);
   }
 
+  public static Task<?> getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath,
+                                        HiveConf conf, boolean isAutoPurge, boolean needRecycle,
+                                        boolean readSourceAsFileList, String dumpDirectory,
+                                        ReplicationMetricCollector metricCollector) {
+    return getLoadCopyTask(replicationSpec, srcPath, dstPath, conf, isAutoPurge, needRecycle,
+            readSourceAsFileList, false, dumpDirectory, metricCollector);
+  }
+
   private static Task<?> getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath,
                                         HiveConf conf, boolean isAutoPurge, boolean needRecycle,
                                         boolean readSourceAsFileList,
@@ -259,12 +268,52 @@ public class ReplCopyTask extends Task<ReplCopyWork> implements Serializable {
     return copyTask;
   }
 
+  private static Task<?> getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath,
+                                         HiveConf conf, boolean isAutoPurge, boolean needRecycle,
+                                         boolean readSourceAsFileList,
+                                         boolean overWrite,
+                                         String dumpDirectory,
+                                         ReplicationMetricCollector metricCollector) {
+    Task<?> copyTask = null;
+    LOG.debug("ReplCopyTask:getLoadCopyTask: {}=>{}", srcPath, dstPath);
+    if ((replicationSpec != null) && replicationSpec.isInReplicationScope()){
+      ReplCopyWork rcwork = new ReplCopyWork(srcPath, dstPath, false, overWrite, dumpDirectory,
+              metricCollector);
+      rcwork.setReadSrcAsFilesList(readSourceAsFileList);
+      if (replicationSpec.isReplace() && (conf.getBoolVar(REPL_ENABLE_MOVE_OPTIMIZATION))) {
+        rcwork.setDeleteDestIfExist(true);
+        rcwork.setAutoPurge(isAutoPurge);
+        rcwork.setNeedRecycle(needRecycle);
+      }
+      // For replace case, duplicate check should not be done. The new base directory will automatically make the older
+      // data invisible. Doing duplicate check and ignoring copy will cause consistency issue if there are multiple
+      // replace events getting replayed in the first incremental load.
+      rcwork.setCheckDuplicateCopy(replicationSpec.needDupCopyCheck() && !replicationSpec.isReplace());
+      LOG.debug("ReplCopyTask:\trcwork");
+      String distCpDoAsUser = conf.getVar(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER);
+      rcwork.setDistCpDoAsUser(distCpDoAsUser);
+      copyTask = TaskFactory.get(rcwork, conf);
+    } else {
+      LOG.debug("ReplCopyTask:\tcwork");
+      copyTask = TaskFactory.get(new CopyWork(srcPath, dstPath, false, dumpDirectory, metricCollector, true), conf);
+    }
+    return copyTask;
+  }
+
+
   public static Task<?> getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath,
                                         HiveConf conf) {
     return getLoadCopyTask(replicationSpec, srcPath, dstPath, conf, false, false,
       true, false);
   }
 
+  public static Task<?> getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath,
+                                        HiveConf conf, String dumpDirectory, ReplicationMetricCollector metricCollector) {
+    return getLoadCopyTask(replicationSpec, srcPath, dstPath, conf, false, false,
+            true, false, dumpDirectory, metricCollector);
+  }
+
+
   /*
    * Invoked in the bootstrap path.
    * Overwrite set to true
@@ -274,4 +323,12 @@ public class ReplCopyTask extends Task<ReplCopyWork> implements Serializable {
     return getLoadCopyTask(replicationSpec, srcPath, dstPath, conf, false, false,
             readSourceAsFileList, overWrite);
   }
+
+  public static Task<?> getLoadCopyTask(ReplicationSpec replicationSpec, Path srcPath, Path dstPath,
+                                        HiveConf conf, boolean readSourceAsFileList, boolean overWrite,
+                                        String dumpDirectory, ReplicationMetricCollector metricCollector) {
+    return getLoadCopyTask(replicationSpec, srcPath, dstPath, conf, false, false,
+            readSourceAsFileList, overWrite, dumpDirectory, metricCollector);
+  }
+
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java
index 48721d3..c9ad302 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java
@@ -132,7 +132,9 @@ public class ReplTxnTask extends Task<ReplTxnWork> {
       console.printError("Failed with exception " + e.getMessage(), "\n"
           + StringUtils.stringifyException(e));
       setException(e);
-      return 1;
+      LOG.error("ReplTxnTask failed", e);
+      return ReplUtils.handleException(true, e, work.getDumpDirectory(), work.getMetricCollector(),
+              getName(), conf);
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckTask.java
index 4dba12c..bf5ed6c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckTask.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.exec.repl;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
@@ -45,9 +46,16 @@ public class AckTask extends Task<AckWork> implements Serializable {
       Path ackPath = work.getAckFilePath();
       Utils.create(ackPath, conf);
       LOG.info("Created ack file : {} ", ackPath);
-    } catch (SemanticException e) {
+    } catch (Exception e) {
       setException(e);
-      return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
+      int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
+      try {
+        return ReplUtils.handleException(true, e, work.getAckFilePath().getParent().getParent().toString(),
+                work.getMetricCollector(), getName(), conf);
+      } catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
+        return errorCode;
+      }
     }
     return 0;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckWork.java
index 0fa0a95..8f9a237 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AckWork.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.exec.repl;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
@@ -33,13 +34,22 @@ import java.io.Serializable;
 public class AckWork implements Serializable {
   private static final long serialVersionUID = 1L;
   private Path ackFilePath;
+  private transient ReplicationMetricCollector metricCollector;
 
   public Path getAckFilePath() {
     return ackFilePath;
   }
 
+  public ReplicationMetricCollector getMetricCollector() {
+    return metricCollector;
+  }
+
   public AckWork(Path ackFilePath) {
     this.ackFilePath = ackFilePath;
   }
 
+  public AckWork(Path ackFilePath, ReplicationMetricCollector metricCollector) {
+    this.ackFilePath = ackFilePath;
+    this.metricCollector = metricCollector;
+  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java
index 2f6b918..8036e51 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasDumpTask.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
 import org.apache.hadoop.hive.ql.parse.repl.dump.log.AtlasDumpLogger;
 import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -104,24 +105,27 @@ public class AtlasDumpTask extends Task<AtlasDumpWork> implements Serializable {
       replLogger.endLog(0L);
       work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS);
       return 0;
+    } catch (RuntimeException e) {
+      LOG.error("RuntimeException while dumping atlas metadata", e);
+      setException(e);
+      try{
+        ReplUtils.handleException(true, e, work.getStagingDir().getParent().toString(), work.getMetricCollector(),
+                getName(), conf);
+      } catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
+      }
+      throw e;
     } catch (Exception e) {
       LOG.error("Exception while dumping atlas metadata", e);
       setException(e);
       int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
-      try {
-        if (errorCode > 40000) {
-          //Create non recoverable marker at top level
-          Path nonRecoverableMarker = new Path(work.getStagingDir().getParent(),
-            ReplAck.NON_RECOVERABLE_MARKER.toString());
-          Utils.writeStackTrace(e, nonRecoverableMarker, conf);
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString());
-        } else {
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED);
-        }
+      try{
+        return ReplUtils.handleException(true, e, work.getStagingDir().getParent().toString(), work.getMetricCollector(),
+                getName(), conf);
       } catch (Exception ex) {
-        LOG.error("Failed to collect Metrics ", ex);
+        LOG.error("Failed to collect replication metrics: ", ex);
+        return errorCode;
       }
-      return errorCode;
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java
index 3a65847..a44aa43 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/AtlasLoadTask.java
@@ -88,24 +88,28 @@ public class AtlasLoadTask extends Task<AtlasLoadWork> implements Serializable {
       LOG.info("Atlas entities import count {}", importCount);
       work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS);
       return 0;
+    } catch (RuntimeException e) {
+      LOG.error("RuntimeException while loading atlas metadata", e);
+      setException(e);
+      try{
+        ReplUtils.handleException(true, e, work.getStagingDir().getParent().toString(), work.getMetricCollector(),
+                getName(), conf);
+      } catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
+      }
+      throw e;
     } catch (Exception e) {
       LOG.error("Exception while loading atlas metadata", e);
       setException(e);
       int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
-      try {
-        if (errorCode > 40000) {
-          //Create non recoverable marker at top level
-          Path nonRecoverableMarker = new Path(work.getStagingDir().getParent(),
-            ReplAck.NON_RECOVERABLE_MARKER.toString());
-          Utils.writeStackTrace(e, nonRecoverableMarker, conf);
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString());
-        } else {
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED);
-        }
-      } catch (Exception ex) {
-        LOG.error("Failed to collect Metrics ", ex);
+      try{
+        return ReplUtils.handleException(true, e, work.getStagingDir().getParent().toString(), work.getMetricCollector(),
+                getName(), conf);
+      }
+      catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
+        return errorCode;
       }
-      return errorCode;
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyTask.java
index 5ed09f8..43d45a9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyTask.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.exec.util.Retryable;
 import org.apache.hadoop.hive.ql.parse.repl.CopyUtils;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
@@ -140,7 +141,10 @@ public class DirCopyTask extends Task<DirCopyWork> implements Serializable {
         }
       });
     } catch (Exception e) {
-      throw new SecurityException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e);
+      LOG.error("Replication failed ", e);
+      Exception ex = new SecurityException(ErrorMsg.REPL_RETRY_EXHAUSTED.format(e.getMessage()), e);
+      setException(ex);
+      return ReplUtils.handleException(true, ex, work.getDumpDirectory(), work.getMetricCollector(), getName(), conf);
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java
index 04bbd56..c232dfa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec.repl;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.repl.util.StringConvertibleObject;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.plan.Explain;
 import java.io.Serializable;
 
@@ -33,8 +34,12 @@ public class DirCopyWork implements Serializable, StringConvertibleObject {
   private static final long serialVersionUID = 1L;
   private Path fullyQualifiedSourcePath;
   private Path fullyQualifiedTargetPath;
+  private String dumpDirectory;
+  private transient ReplicationMetricCollector metricCollector;
 
-  public DirCopyWork() {
+  public DirCopyWork(ReplicationMetricCollector metricCollector, String dumpDirectory) {
+    this.metricCollector = metricCollector;
+    this.dumpDirectory = dumpDirectory;
   }
 
   public DirCopyWork(Path fullyQualifiedSourcePath, Path fullyQualifiedTargetPath) {
@@ -57,6 +62,14 @@ public class DirCopyWork implements Serializable, StringConvertibleObject {
     return fullyQualifiedTargetPath;
   }
 
+  public ReplicationMetricCollector getMetricCollector() {
+    return metricCollector;
+  }
+
+  public String getDumpDirectory() {
+    return dumpDirectory;
+  }
+
   @Override
   public String convertToString() {
     StringBuilder objInStr = new StringBuilder();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java
index eb2af1d..e7b403b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerDumpTask.java
@@ -134,24 +134,27 @@ public class RangerDumpTask extends Task<RangerDumpWork> implements Serializable
       LOG.debug("Ranger policy export filePath:" + filePath);
       LOG.info("Number of ranger policies exported {}", exportCount);
       return 0;
+    } catch (RuntimeException e) {
+      LOG.error("RuntimeException during Ranger dump", e);
+      setException(e);
+      try{
+        ReplUtils.handleException(true, e, work.getCurrentDumpPath().getParent().toString(),
+                work.getMetricCollector(), getName(), conf); 
+      } catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
+      }
+      throw e;
     } catch (Exception e) {
-      LOG.error("failed", e);
+      LOG.error("Ranger Dump Failed: ", e);
       setException(e);
       int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
-      try {
-        if (errorCode > 40000) {
-          //Create non recoverable marker at top level
-          Path nonRecoverableMarker = new Path(work.getCurrentDumpPath().getParent(),
-            ReplAck.NON_RECOVERABLE_MARKER.toString());
-          Utils.writeStackTrace(e, nonRecoverableMarker, conf);
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString());
-        } else {
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED);
-        }
-      } catch (Exception ex) {
-        LOG.error("Failed to collect Metrics ", ex);
+      try{
+        return ReplUtils.handleException(true, e, work.getCurrentDumpPath().getParent().toString(),
+                work.getMetricCollector(), getName(), conf);
+      } catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
+        return errorCode;        
       }
-      return errorCode;
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java
index 0049f76..63fad4b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/RangerLoadTask.java
@@ -152,24 +152,27 @@ public class RangerLoadTask extends Task<RangerLoadWork> implements Serializable
       }
       work.getMetricCollector().reportStageEnd(getName(), Status.SUCCESS);
       return 0;
-    } catch (Exception e) {
-      LOG.error("Failed", e);
+    } catch (RuntimeException e) {
+      LOG.error("Runtime Excepton during RangerLoad", e);
       setException(e);
+      try{
+        ReplUtils.handleException(true, e, work.getCurrentDumpPath().getParent().toString(), work.getMetricCollector(),
+                getName(), conf);
+      } catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
+      }
+      throw e;
+    } catch (Exception e) {
+      LOG.error("RangerLoad Failed", e);
       int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
-      try {
-        if (errorCode > 40000) {
-          //Create non recoverable marker at top level
-          Path nonRecoverableMarker = new Path(work.getCurrentDumpPath().getParent(),
-            ReplAck.NON_RECOVERABLE_MARKER.toString());
-          Utils.writeStackTrace(e, nonRecoverableMarker, conf);
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString());
-        } else {
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED);
-        }
-      } catch (Exception ex) {
-        LOG.error("Failed to collect Metrics ", ex);
+      setException(e);
+      try{
+        return ReplUtils.handleException(true, e, work.getCurrentDumpPath().getParent().toString(), work.getMetricCollector(),
+                getName(), conf);
+      } catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
+        return errorCode;
       }
-      return errorCode;
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index 4630f95..ea9bf9a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hive.ql.exec.repl.util.TaskTracker;
 import org.apache.hadoop.hive.ql.exec.util.DAGTraversal;
 import org.apache.hadoop.hive.ql.exec.util.Retryable;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.orc.ExternalCache;
 import org.apache.hadoop.hive.ql.lockmgr.DbLockManager;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
 import org.apache.hadoop.hive.ql.lockmgr.LockException;
@@ -193,23 +194,27 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
           LOG.info("Previous Dump is not yet loaded");
         }
       }
+    } catch (RuntimeException e) {
+      LOG.error("replication failed with run time exception", e);
+      setException(e);
+      try{
+        ReplUtils.handleException(true, e, work.getCurrentDumpPath().toString(),
+                work.getMetricCollector(), getName(), conf);
+      } catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
+      }
+      throw e;
     } catch (Exception e) {
-      LOG.error("failed", e);
       setException(e);
       int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
-      try {
-        if (errorCode > 40000) {
-          Path nonRecoverableMarker = new Path(work.getCurrentDumpPath(),
-            ReplAck.NON_RECOVERABLE_MARKER.toString());
-          Utils.writeStackTrace(e, nonRecoverableMarker, conf);
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString());
-        } else {
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED);
-        }
-      } catch (Exception ex) {
-        LOG.error("Failed to collect Metrics", ex);
+      try{
+        return ReplUtils.handleException(true, e, work.getCurrentDumpPath().toString(),
+                work.getMetricCollector(), getName(), conf);
+      }
+      catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
+        return errorCode;        
       }
-      return errorCode;
     }
     return 0;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java
index 64b9dd3..4da5bac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpWork.java
@@ -183,7 +183,7 @@ public class ReplDumpWork implements Serializable {
     }
     List<Task<?>> tasks = new ArrayList<>();
     while (externalTblCopyPathIterator.hasNext() && tracker.canAddMoreTasks()) {
-      DirCopyWork dirCopyWork = new DirCopyWork();
+      DirCopyWork dirCopyWork = new DirCopyWork(metricCollector, currentDumpPath.toString());
       dirCopyWork.loadFromString(externalTblCopyPathIterator.next());
       Task<DirCopyWork> task = TaskFactory.get(dirCopyWork, conf);
       tasks.add(task);
@@ -206,7 +206,8 @@ public class ReplDumpWork implements Serializable {
       managedTableCopyPath.loadFromString(managedTblCopyPathIterator.next());
       Task<?> copyTask = ReplCopyTask.getLoadCopyTask(
               managedTableCopyPath.getReplicationSpec(), managedTableCopyPath.getSrcPath(),
-              managedTableCopyPath.getTargetPath(), conf, false, shouldOverwrite);
+              managedTableCopyPath.getTargetPath(), conf, false, shouldOverwrite,
+              getCurrentDumpPath().toString(), getMetricCollector());
       tasks.add(copyTask);
       tracker.addTask(copyTask);
       LOG.debug("added task for {}", managedTableCopyPath);
@@ -220,7 +221,8 @@ public class ReplDumpWork implements Serializable {
       while (functionCopyPathIterator.hasNext() && tracker.canAddMoreTasks()) {
         EximUtil.DataCopyPath binaryCopyPath = functionCopyPathIterator.next();
         Task<?> copyTask = ReplCopyTask.getLoadCopyTask(
-                binaryCopyPath.getReplicationSpec(), binaryCopyPath.getSrcPath(), binaryCopyPath.getTargetPath(), conf
+                binaryCopyPath.getReplicationSpec(), binaryCopyPath.getSrcPath(), binaryCopyPath.getTargetPath(), conf,
+                getCurrentDumpPath().toString(), getMetricCollector()
         );
         tasks.add(copyTask);
         tracker.addTask(copyTask);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
index 3c6fdaa..16c906c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 import org.apache.hadoop.hive.ql.parse.repl.ReplLogger;
 import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
 import org.apache.hadoop.hive.ql.parse.repl.load.MetaData;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 
@@ -84,6 +85,7 @@ import static org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.RANGER_AUTHORIZ
 public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
   private static final long serialVersionUID = 1L;
   private final static int ZERO_TASKS = 0;
+  private final String STAGE_NAME = "REPL_LOAD";
 
   @Override
   public String getName() {
@@ -128,29 +130,25 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
       }
     } catch (RuntimeException e) {
       LOG.error("replication failed with run time exception", e);
+      setException(e);
       try {
-        work.getMetricCollector().reportEnd(Status.FAILED);
-      } catch (SemanticException ex) {
-        LOG.error("Failed to collect Metrics ", ex);
+        ReplUtils.handleException(true, e, new Path(work.getDumpDirectory()).getParent().toString(),
+                work.getMetricCollector(), STAGE_NAME, conf);
+      } catch (Exception ex){
+        LOG.error("Failed to collect replication metrics: ", ex);
       }
       throw e;
     } catch (Exception e) {
-      LOG.error("replication failed", e);
       setException(e);
       int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
       try {
-        if (errorCode > 40000) {
-          Path nonRecoverableMarker = new Path(new Path(work.dumpDirectory).getParent(),
-            ReplAck.NON_RECOVERABLE_MARKER.toString());
-          Utils.writeStackTrace(e, nonRecoverableMarker, conf);
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED_ADMIN, nonRecoverableMarker.toString());
-        } else {
-          work.getMetricCollector().reportStageEnd(getName(), Status.FAILED);
-        }
-      } catch (Exception ex) {
-        LOG.error("Failed to collect Metrics ", ex);
+        return ReplUtils.handleException(true, e, new Path(work.getDumpDirectory()).getParent().toString(),
+                work.getMetricCollector(), STAGE_NAME, conf);
+      }
+      catch (Exception ex) {
+        LOG.error("Failed to collect replication metrics: ", ex);
+        return errorCode;
       }
-      return errorCode;
     }
   }
 
@@ -230,7 +228,8 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
       switch (next.eventType()) {
       case Database:
         DatabaseEvent dbEvent = (DatabaseEvent) next;
-        dbTracker = new LoadDatabase(loadContext, dbEvent, work.dbNameToLoadIn, loadTaskTracker).tasks();
+        dbTracker = new LoadDatabase(loadContext, dbEvent, work.dbNameToLoadIn, loadTaskTracker,
+                work.getMetricCollector()).tasks();
         loadTaskTracker.update(dbTracker);
         if (work.hasDbState()) {
           loadTaskTracker.update(updateDatabaseLastReplID(maxTasks, loadContext, scope));
@@ -257,7 +256,8 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
         FSTableEvent tableEvent = (FSTableEvent) next;
         if (TableType.VIRTUAL_VIEW.name().equals(tableEvent.getMetaData().getTable().getTableType())) {
           tableTracker = new TaskTracker(1);
-          tableTracker.addTask(createViewTask(tableEvent.getMetaData(), work.dbNameToLoadIn, conf));
+          tableTracker.addTask(createViewTask(tableEvent.getMetaData(), work.dbNameToLoadIn, conf,
+                  (new Path(work.dumpDirectory).getParent()).toString(), work.getMetricCollector()));
         } else {
           LoadTable loadTable = new LoadTable(tableEvent, loadContext, iterator.replLogger(), tableContext,
               loadTaskTracker, work.getMetricCollector());
@@ -388,7 +388,8 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
                                               TaskTracker dbTracker,
                                               Scope scope) throws IOException, SemanticException {
     LoadConstraint loadConstraint =
-        new LoadConstraint(loadContext, (ConstraintEvent) next, work.dbNameToLoadIn, dbTracker);
+        new LoadConstraint(loadContext, (ConstraintEvent) next, work.dbNameToLoadIn, dbTracker,
+                (new Path(work.dumpDirectory)).getParent().toString(), work.getMetricCollector());
     TaskTracker constraintTracker = loadConstraint.tasks();
     scope.rootTasks.addAll(constraintTracker.tasks());
     constraintTracker.debugLog("constraints");
@@ -398,7 +399,8 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
   private TaskTracker addLoadFunctionTasks(Context loadContext, BootstrapEventsIterator iterator, BootstrapEvent next,
                                     TaskTracker dbTracker, Scope scope) throws IOException, SemanticException {
     LoadFunction loadFunction = new LoadFunction(loadContext, iterator.replLogger(),
-        (FunctionEvent) next, work.dbNameToLoadIn, dbTracker, work.getMetricCollector());
+            (FunctionEvent) next, work.dbNameToLoadIn, dbTracker, (new Path(work.dumpDirectory)).getParent().toString(),
+            work.getMetricCollector());
     TaskTracker functionsTracker = loadFunction.tasks();
     if (!scope.database) {
       scope.rootTasks.addAll(functionsTracker.tasks());
@@ -432,6 +434,31 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
     return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), desc), conf);
   }
 
+  public static Task<?> createViewTask(MetaData metaData, String dbNameToLoadIn, HiveConf conf,
+                                       String dumpDirectory, ReplicationMetricCollector metricCollector)
+          throws SemanticException {
+    Table table = new Table(metaData.getTable());
+    String dbName = dbNameToLoadIn == null ? table.getDbName() : dbNameToLoadIn;
+    TableName tableName = HiveTableName.ofNullable(table.getTableName(), dbName);
+    String dbDotView = tableName.getNotEmptyDbTable();
+
+    String viewOriginalText = table.getViewOriginalText();
+    String viewExpandedText = table.getViewExpandedText();
+    if (!dbName.equals(table.getDbName())) {
+      // TODO: If the DB name doesn't match with the metadata from dump, then need to rewrite the original and expanded
+      // texts using new DB name. Currently it refers to the source database name.
+    }
+
+    CreateViewDesc desc = new CreateViewDesc(dbDotView, table.getAllCols(), null, table.getParameters(),
+            table.getPartColNames(), false, false, viewOriginalText, viewExpandedText, table.getPartCols());
+
+    desc.setReplicationSpec(metaData.getReplicationSpec());
+    desc.setOwnerName(table.getOwner());
+
+    return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), desc, true,
+            dumpDirectory, metricCollector), conf);
+  }
+
   /**
    * If replication policy is changed between previous and current load, then the excluded tables in
    * the new replication policy will be dropped.
@@ -467,7 +494,7 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
         || (!work.isIncrementalLoad() && !work.hasBootstrapLoadTasks())) {
       //All repl load tasks are executed and status is 0, create the task to add the acknowledgement
       AckWork replLoadAckWork = new AckWork(
-          new Path(work.dumpDirectory, LOAD_ACKNOWLEDGEMENT.toString()));
+              new Path(work.dumpDirectory, LOAD_ACKNOWLEDGEMENT.toString()), work.getMetricCollector());
       Task<AckWork> loadAckWorkTask = TaskFactory.get(replLoadAckWork, conf);
       if (childTasks.isEmpty()) {
         childTasks.add(loadAckWorkTask);
@@ -488,7 +515,9 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
       Database dbInMetadata = work.databaseEvent(context.hiveConf).dbInMetadata(work.dbNameToLoadIn);
       dbProps = dbInMetadata.getParameters();
     }
-    ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, dbProps, work.getMetricCollector());
+    ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, dbProps,
+                                                        (new Path(work.dumpDirectory).getParent()).toString(),
+                                                        work.getMetricCollector());
     Task<ReplStateLogWork> replLogTask = TaskFactory.get(replLogWork, conf);
     if (scope.rootTasks.isEmpty()) {
       scope.rootTasks.add(replLogTask);
@@ -514,7 +543,7 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
    */
     TaskTracker taskTracker =
         new AlterDatabase(context, work.databaseEvent(context.hiveConf), work.dbNameToLoadIn,
-            new TaskTracker(maxTasks)).tasks();
+            new TaskTracker(maxTasks), work.getMetricCollector()).tasks();
 
     AddDependencyToLeaves function = new AddDependencyToLeaves(taskTracker.tasks());
     DAGTraversal.traverse(scope.rootTasks, function);
@@ -604,7 +633,8 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
             new AlterDatabaseSetPropertiesDesc(dbName, mapProp,
                 new ReplicationSpec(lastEventid, lastEventid));
         Task<?> updateReplIdTask =
-            TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc), conf);
+            TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc, true,
+                    (new Path(work.dumpDirectory).getParent()).toString(), work.getMetricCollector()), conf);
         DAGTraversal.traverse(childTasks, new AddDependencyToLeaves(updateReplIdTask));
         work.setLastReplIDUpdated(true);
         LOG.debug("Added task to set last repl id of db " + dbName + " to " + lastEventid);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java
index 4050235..376fd7c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java
@@ -163,6 +163,8 @@ public class ReplLoadWork implements Serializable {
     return rootTask;
   }
 
+  public String getDumpDirectory() {return dumpDirectory;}
+  
   public void setRootTask(Task<?> rootTask) {
     this.rootTask = rootTask;
   }
@@ -193,7 +195,7 @@ public class ReplLoadWork implements Serializable {
     }
     List<Task<?>> tasks = new ArrayList<>();
     while (externalTableDataCopyItr.hasNext() && tracker.canAddMoreTasks()) {
-      DirCopyWork dirCopyWork = new DirCopyWork();
+      DirCopyWork dirCopyWork = new DirCopyWork(metricCollector, (new Path(dumpDirectory).getParent()).toString());
       dirCopyWork.loadFromString(externalTableDataCopyItr.next());
       Task<DirCopyWork> task = TaskFactory.get(dirCopyWork, conf);
       tasks.add(task);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java
index 240f5a7..230f056 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogTask.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.exec.repl;
 
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 
@@ -38,10 +39,11 @@ public class ReplStateLogTask extends Task<ReplStateLogWork> implements Serializ
   public int execute() {
     try {
       work.replStateLog();
-    } catch (SemanticException e) {
+    } catch (Exception e) {
       LOG.error("Exception while logging metrics ", e);
       setException(e);
-      return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
+      return ReplUtils.handleException(true, e, work.getDumpDirectory(), work.getMetricCollector(),
+              getName(), conf);
     }
     return 0;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java
index 5bd7bda..ce9b545 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStateLogWork.java
@@ -48,6 +48,7 @@ public class ReplStateLogWork implements Serializable {
   private TableType tableType;
   private String functionName;
   private String lastReplId;
+  String dumpDirectory;
   private final transient ReplicationMetricCollector metricCollector;
 
   private enum LOG_TYPE {
@@ -67,6 +68,16 @@ public class ReplStateLogWork implements Serializable {
   }
 
   public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metricCollector,
+                          String eventId, String eventType, String dumpDirectory) {
+    this.logType = LOG_TYPE.EVENT;
+    this.replLogger = replLogger;
+    this.eventId = eventId;
+    this.eventType = eventType;
+    this.metricCollector = metricCollector;
+    this.dumpDirectory = dumpDirectory;
+  }
+
+  public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metricCollector,
                           String tableName, TableType tableType) {
     this.logType = LOG_TYPE.TABLE;
     this.replLogger = replLogger;
@@ -75,6 +86,16 @@ public class ReplStateLogWork implements Serializable {
     this.metricCollector = metricCollector;
   }
 
+  public ReplStateLogWork(ReplLogger replLogger, ReplicationMetricCollector metricCollector,
+                          String tableName, TableType tableType, String dumpDirectory) {
+    this.logType = LOG_TYPE.TABLE;
+    this.replLogger = replLogger;
+    this.tableName = tableName;
+    this.tableType = tableType;
+    this.metricCollector = metricCollector;
+    this.dumpDirectory = dumpDirectory;
+  }
+
   public ReplStateLogWork(ReplLogger replLogger, String functionName, ReplicationMetricCollector metricCollector) {
     this.logType = LOG_TYPE.FUNCTION;
     this.replLogger = replLogger;
@@ -82,6 +103,14 @@ public class ReplStateLogWork implements Serializable {
     this.metricCollector = metricCollector;
   }
 
+  public ReplStateLogWork(ReplLogger replLogger, String functionName, String dumpDirectory, ReplicationMetricCollector metricCollector) {
+    this.logType = LOG_TYPE.FUNCTION;
+    this.replLogger = replLogger;
+    this.functionName = functionName;
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+  }
+
   public ReplStateLogWork(ReplLogger replLogger, Map<String, String> dbProps, ReplicationMetricCollector collector) {
     this.logType = LOG_TYPE.END;
     this.replLogger = replLogger;
@@ -89,6 +118,19 @@ public class ReplStateLogWork implements Serializable {
     this.metricCollector = collector;
   }
 
+  public ReplStateLogWork(ReplLogger replLogger, Map<String, String> dbProps, String dumpDirectory, ReplicationMetricCollector collector) {
+    this.logType = LOG_TYPE.END;
+    this.replLogger = replLogger;
+    this.lastReplId = ReplicationSpec.getLastReplicatedStateFromParameters(dbProps);
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = collector;
+  }
+
+
+  public ReplicationMetricCollector getMetricCollector() { return metricCollector; }
+
+  public String getDumpDirectory() { return dumpDirectory; }
+
   public void replStateLog() throws SemanticException {
     switch (logType) {
     case TABLE:
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java
index e95dbb7..b131b8e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hive.ql.parse.repl.load.message.AddNotNullConstraintHan
 import org.apache.hadoop.hive.ql.parse.repl.load.message.AddPrimaryKeyHandler;
 import org.apache.hadoop.hive.ql.parse.repl.load.message.AddUniqueConstraintHandler;
 import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.json.JSONObject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -71,6 +72,8 @@ public class LoadConstraint {
   private final String dbNameToLoadIn;
   private final TaskTracker tracker;
   private final MessageDeserializer deserializer = JSONMessageEncoder.getInstance().getDeserializer();
+  String dumpDirectory;
+  private transient ReplicationMetricCollector metricCollector;
 
   public LoadConstraint(Context context, ConstraintEvent event, String dbNameToLoadIn,
       TaskTracker existingTracker) {
@@ -80,6 +83,17 @@ public class LoadConstraint {
     this.tracker = new TaskTracker(existingTracker);
   }
 
+  public LoadConstraint(Context context, ConstraintEvent event, String dbNameToLoadIn,
+                        TaskTracker existingTracker, String dumpDirectory,
+                        ReplicationMetricCollector metricCollector) {
+    this.context = context;
+    this.event = event;
+    this.dbNameToLoadIn = dbNameToLoadIn;
+    this.tracker = new TaskTracker(existingTracker);
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+  }
+
   public TaskTracker tasks() throws IOException, SemanticException {
     URI fromURI = EximUtil
         .getValidatedURI(context.hiveConf, stripQuotes(event.rootDir().toUri().toString()));
@@ -104,7 +118,7 @@ public class LoadConstraint {
         tasks.addAll(pkHandler.handle(
             new MessageHandler.Context(
                 dbNameToLoadIn, fromPath.toString(), null, pkDumpMetaData, context.hiveConf,
-                context.hiveDb, context.nestedContext, LOG)));
+                context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
       }
 
       if (StringUtils.isNotEmpty(StringUtils.trim(uksString)) && !isUniqueConstraintsAlreadyLoaded(uksString)) {
@@ -115,7 +129,7 @@ public class LoadConstraint {
         tasks.addAll(ukHandler.handle(
             new MessageHandler.Context(
                 dbNameToLoadIn, fromPath.toString(), null, ukDumpMetaData, context.hiveConf,
-                context.hiveDb, context.nestedContext, LOG)));
+                context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
       }
 
       if (StringUtils.isNotEmpty(StringUtils.trim(nnsString)) && !isNotNullConstraintsAlreadyLoaded(nnsString)) {
@@ -126,7 +140,7 @@ public class LoadConstraint {
         tasks.addAll(nnHandler.handle(
             new MessageHandler.Context(
                 dbNameToLoadIn, fromPath.toString(), null, nnDumpMetaData, context.hiveConf,
-                context.hiveDb, context.nestedContext, LOG)));
+                context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
       }
 
       if (StringUtils.isNotEmpty(StringUtils.trim(dksString)) && !isDefaultConstraintsAlreadyLoaded(dksString)) {
@@ -137,7 +151,7 @@ public class LoadConstraint {
         tasks.addAll(dkHandler.handle(
             new MessageHandler.Context(
                 dbNameToLoadIn, fromPath.toString(), null, dkDumpMetaData, context.hiveConf,
-                context.hiveDb, context.nestedContext, LOG)));
+                context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
       }
 
       if (StringUtils.isNotEmpty(StringUtils.trim(cksString))  && !isCheckConstraintsAlreadyLoaded(cksString)) {
@@ -148,7 +162,7 @@ public class LoadConstraint {
         tasks.addAll(ckHandler.handle(
             new MessageHandler.Context(
                 dbNameToLoadIn, fromPath.toString(), null, dkDumpMetaData, context.hiveConf,
-                context.hiveDb, context.nestedContext, LOG)));
+                context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
       }
 
       if (StringUtils.isNotEmpty(StringUtils.trim(fksString)) && !isForeignKeysAlreadyLoaded(fksString)) {
@@ -159,7 +173,7 @@ public class LoadConstraint {
         tasks.addAll(fkHandler.handle(
             new MessageHandler.Context(
                 dbNameToLoadIn, fromPath.toString(), null, fkDumpMetaData, context.hiveConf,
-                context.hiveDb, context.nestedContext, LOG)));
+                context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)));
       }
 
       tasks.forEach(tracker::addTask);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
index 41e09e1..343347a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.exec.repl.bootstrap.load;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.ReplLoadOpType;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 
 import java.io.Serializable;
 import java.util.HashMap;
@@ -50,6 +52,7 @@ public class LoadDatabase {
 
   private final DatabaseEvent event;
   private final String dbNameToLoadIn;
+  transient ReplicationMetricCollector metricCollector;
 
   public LoadDatabase(Context context, DatabaseEvent event, String dbNameToLoadIn, TaskTracker loadTaskTracker) {
     this.context = context;
@@ -58,6 +61,15 @@ public class LoadDatabase {
     this.tracker = new TaskTracker(loadTaskTracker);
   }
 
+  public LoadDatabase(Context context, DatabaseEvent event, String dbNameToLoadIn,
+                      TaskTracker loadTaskTracker, ReplicationMetricCollector metricCollector) {
+    this.context = context;
+    this.event = event;
+    this.dbNameToLoadIn = dbNameToLoadIn;
+    this.tracker = new TaskTracker(loadTaskTracker);
+    this.metricCollector = metricCollector;
+  }
+
   public TaskTracker tasks() throws Exception {
     Database dbInMetadata = readDbMetadata();
     String dbName = dbInMetadata.getName();
@@ -123,19 +135,21 @@ public class LoadDatabase {
     // If it exists, we want this to be an error condition. Repl Load is not intended to replace a
     // db.
     // TODO: we might revisit this in create-drop-recreate cases, needs some thinking on.
-    DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), createDbDesc);
+    DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), createDbDesc, true,
+            (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector);
     return TaskFactory.get(work, context.hiveConf);
   }
 
   private Task<?> alterDbTask(Database dbObj) {
     return alterDbTask(dbObj.getName(), updateDbProps(dbObj, context.dumpDirectory),
-            context.hiveConf);
+            context.hiveConf, context.dumpDirectory, this.metricCollector);
   }
 
   private Task<?> setOwnerInfoTask(Database dbObj) {
     AlterDatabaseSetOwnerDesc alterDbDesc = new AlterDatabaseSetOwnerDesc(dbObj.getName(),
         new PrincipalDesc(dbObj.getOwnerName(), dbObj.getOwnerType()), null);
-    DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc);
+    DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc, true,
+            (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector);
     return TaskFactory.get(work, context.hiveConf);
   }
 
@@ -165,9 +179,11 @@ public class LoadDatabase {
   }
 
   private static Task<?> alterDbTask(String dbName, Map<String, String> props,
-                                                          HiveConf hiveConf) {
+                                     HiveConf hiveConf, String dumpDirectory,
+                                     ReplicationMetricCollector metricCollector) {
     AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(dbName, props, null);
-    DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc);
+    DDLWork work = new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc, true,
+            (new Path(dumpDirectory)).getParent().toString(), metricCollector);
     return TaskFactory.get(work, hiveConf);
   }
 
@@ -178,10 +194,16 @@ public class LoadDatabase {
       super(context, event, dbNameToLoadIn, loadTaskTracker);
     }
 
+    public AlterDatabase(Context context, DatabaseEvent event, String dbNameToLoadIn,
+                         TaskTracker loadTaskTracker, ReplicationMetricCollector metricCollector) {
+      super(context, event, dbNameToLoadIn, loadTaskTracker, metricCollector);
+    }
+
     @Override
     public TaskTracker tasks() throws SemanticException {
       Database dbObj = readDbMetadata();
-      tracker.addTask(alterDbTask(dbObj.getName(), dbObj.getParameters(), context.hiveConf));
+      tracker.addTask(alterDbTask(dbObj.getName(), dbObj.getParameters(), context.hiveConf,
+              context.dumpDirectory, this.metricCollector ));
       return tracker;
     }
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java
index 667ec7f..7350267 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadFunction.java
@@ -56,6 +56,7 @@ public class LoadFunction {
   private final FunctionEvent event;
   private final String dbNameToLoadIn;
   private final TaskTracker tracker;
+  String dumpDirectory;
   private final ReplicationMetricCollector metricCollector;
 
   public LoadFunction(Context context, ReplLogger replLogger, FunctionEvent event,
@@ -68,9 +69,21 @@ public class LoadFunction {
     this.metricCollector = metricCollector;
   }
 
+  public LoadFunction(Context context, ReplLogger replLogger, FunctionEvent event,
+                      String dbNameToLoadIn, TaskTracker existingTracker,
+                      String dumpDirectory, ReplicationMetricCollector metricCollector) {
+    this.context = context;
+    this.replLogger = replLogger;
+    this.event = event;
+    this.dbNameToLoadIn = dbNameToLoadIn;
+    this.tracker = new TaskTracker(existingTracker);
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+  }
+
   private void createFunctionReplLogTask(List<Task<?>> functionTasks,
                                          String functionName) {
-    ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, functionName, metricCollector);
+    ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, functionName, dumpDirectory, metricCollector);
     Task<ReplStateLogWork> replLogTask = TaskFactory.get(replLogWork, context.hiveConf);
     DAGTraversal.traverse(functionTasks, new AddDependencyToLeaves(replLogTask));
   }
@@ -88,7 +101,7 @@ public class LoadFunction {
       List<Task<?>> tasks = handler.handle(
           new MessageHandler.Context(
               dbNameToLoadIn, fromPath.toString(), null, null, context.hiveConf,
-              context.hiveDb, context.nestedContext, LOG)
+              context.hiveDb, context.nestedContext, LOG, dumpDirectory, metricCollector)
       );
       createFunctionReplLogTask(tasks, handler.getFunctionName());
       tasks.forEach(tracker::addTask);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
index 48c5e73..e0c9b96 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
@@ -128,7 +128,8 @@ public class LoadPartitions {
         if (!forNewTable().hasReplicationState()) {
           // Add ReplStateLogTask only if no pending table load tasks left for next cycle
           Task<?> replLogTask
-                  = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector);
+                  = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector,
+                  (new Path(context.dumpDirectory)).getParent().toString());
           tracker.addDependentTask(replLogTask);
         }
         return tracker;
@@ -142,7 +143,8 @@ public class LoadPartitions {
           if (!forExistingTable(lastReplicatedPartition).hasReplicationState()) {
             // Add ReplStateLogTask only if no pending table load tasks left for next cycle
             Task<?> replLogTask
-                    = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector);
+                    = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector,
+                    (new Path(context.dumpDirectory)).getParent().toString());
             tracker.addDependentTask(replLogTask);
           }
           return tracker;
@@ -248,7 +250,8 @@ public class LoadPartitions {
   private Task<?> tasksForAddPartition(Table table, AlterTableAddPartitionDesc addPartitionDesc, Task<?> ptnRootTask)
           throws MetaException, HiveException {
     Task<?> addPartTask = TaskFactory.get(
-      new DDLWork(new HashSet<>(), new HashSet<>(), addPartitionDesc),
+      new DDLWork(new HashSet<>(), new HashSet<>(), addPartitionDesc,
+              true, (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector),
       context.hiveConf
     );
     //checkpointing task already added as part of add batch of partition in case for metadata only and external tables
@@ -272,6 +275,7 @@ public class LoadPartitions {
       tableDesc,
       (HashMap<String, String>)partSpec.getPartSpec(),
       context.dumpDirectory,
+      this.metricCollector,
       context.hiveConf
     );
 
@@ -290,7 +294,8 @@ public class LoadPartitions {
         event.replicationSpec(),
         new Path(event.dataPath() + Path.SEPARATOR + getPartitionName(sourceWarehousePartitionLocation)),
         stagingDir,
-        context.hiveConf, copyAtLoad, false
+        context.hiveConf, copyAtLoad, false, (new Path(context.dumpDirectory)).getParent().toString(),
+        this.metricCollector
     );
 
     Task<?> movePartitionTask = null;
@@ -328,7 +333,9 @@ public class LoadPartitions {
    */
   private Task<?> movePartitionTask(Table table, AlterTableAddPartitionDesc.PartitionDesc partSpec, Path tmpPath,
                                     LoadFileType loadFileType) {
-    MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false);
+    MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false,
+                                    (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector,
+                                     true);
     if (AcidUtils.isTransactionalTable(table)) {
       LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc(
         Collections.singletonList(tmpPath),
@@ -386,7 +393,8 @@ public class LoadPartitions {
       AlterTableDropPartitionDesc dropPtnDesc = new AlterTableDropPartitionDesc(HiveTableName.of(table),
           partSpecsExpr, true, event.replicationSpec());
       dropPtnTask = TaskFactory.get(
-              new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf
+              new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc, true,
+                      (new Path(context.dumpDirectory)).getParent().toString(), this.metricCollector), context.hiveConf
       );
     }
     return dropPtnTask;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
index 45fca07..11a1036 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
@@ -147,11 +147,13 @@ public class LoadTable {
             tableDesc,
             null,
             context.dumpDirectory,
+            this.metricCollector,
             context.hiveConf
     );
     if (!isPartitioned(tableDesc)) {
       Task<?> replLogTask
-              = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector);
+              = ReplUtils.getTableReplLogTask(tableDesc, replLogger, context.hiveConf, metricCollector,
+                                              (new Path(context.dumpDirectory)).getParent().toString());
       ckptTask.addDependentTask(replLogTask);
     }
     tracker.addDependentTask(ckptTask);
@@ -187,7 +189,8 @@ public class LoadTable {
       tblDesc.setLocation(null);
     }
     Task<?> createTableTask =
-        tblDesc.getCreateTableTask(new HashSet<>(), new HashSet<>(), context.hiveConf);
+        tblDesc.getCreateTableTask(new HashSet<>(), new HashSet<>(), context.hiveConf, true,
+                (new Path(context.dumpDirectory)).getParent().toString(), metricCollector);
     if (tblRootTask == null) {
       tblRootTask = createTableTask;
     } else {
@@ -202,7 +205,8 @@ public class LoadTable {
     if (replicationSpec.isTransactionalTableDump()) {
       List<String> partNames = isPartitioned(tblDesc) ? event.partitions(tblDesc) : null;
       ReplTxnWork replTxnWork = new ReplTxnWork(tblDesc.getDatabaseName(), tblDesc.getTableName(), partNames,
-              replicationSpec.getValidWriteIdList(), ReplTxnWork.OperationType.REPL_WRITEID_STATE);
+              replicationSpec.getValidWriteIdList(), ReplTxnWork.OperationType.REPL_WRITEID_STATE,
+              (new Path(context.dumpDirectory)).getParent().toString(), metricCollector);
       Task<?> replTxnTask = TaskFactory.get(replTxnWork, context.hiveConf);
       parentTask.addDependentTask(replTxnTask);
       parentTask = replTxnTask;
@@ -283,9 +287,11 @@ public class LoadTable {
 
     boolean copyAtLoad = context.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET);
     Task<?> copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, tmpPath, context.hiveConf,
-            copyAtLoad, false);
+            copyAtLoad, false, (new Path(context.dumpDirectory)).getParent().toString(), metricCollector);
 
-    MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false);
+    MoveWork moveWork = new MoveWork(new HashSet<>(), new HashSet<>(), null, null, false,
+                                     (new Path(context.dumpDirectory)).getParent().toString(), metricCollector,
+                                      true);
     if (AcidUtils.isTransactionalTable(table)) {
       LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc(
         Collections.singletonList(tmpPath),
@@ -308,6 +314,8 @@ public class LoadTable {
   private Task<?> dropTableTask(Table table) {
     assert(table != null);
     DropTableDesc dropTblDesc = new DropTableDesc(table.getFullyQualifiedName(), true, false, event.replicationSpec());
-    return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf);
+    return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc,
+                                      true, (new Path(context.dumpDirectory)).getParent().toString(),
+                                      this.metricCollector), context.hiveConf);
   }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java
index b00341a..15dc451 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java
@@ -71,6 +71,7 @@ public class IncrementalLoadTasksBuilder {
   private final ReplLogger replLogger;
   private static long numIteration;
   private final Long eventTo;
+  private String dumpDirectory;
   private final ReplicationMetricCollector metricCollector;
 
   public IncrementalLoadTasksBuilder(String dbName, String loadPath,
@@ -78,6 +79,7 @@ public class IncrementalLoadTasksBuilder {
                                      Long eventTo,
                                      ReplicationMetricCollector metricCollector) throws SemanticException {
     this.dbName = dbName;
+    dumpDirectory = (new Path(loadPath).getParent()).toString();
     this.iterator = iterator;
     inputs = new HashSet<>();
     outputs = new HashSet<>();
@@ -135,13 +137,14 @@ public class IncrementalLoadTasksBuilder {
       // entire chain
 
       MessageHandler.Context mhContext = new MessageHandler.Context(dbName, location,
-              taskChainTail, eventDmd, conf, hive, context, this.log);
+              taskChainTail, eventDmd, conf, hive, context, this.log,
+              dumpDirectory, metricCollector);
       List<Task<?>> evTasks = analyzeEventLoad(mhContext);
 
       if ((evTasks != null) && (!evTasks.isEmpty())) {
         ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, metricCollector,
                 dir.getPath().getName(),
-                eventDmd.getDumpType().toString());
+                eventDmd.getDumpType().toString(), dumpDirectory);
         Task<?> barrierTask = TaskFactory.get(replStateLogWork, conf);
         AddDependencyToLeaves function = new AddDependencyToLeaves(barrierTask);
         DAGTraversal.traverse(evTasks, function);
@@ -155,13 +158,14 @@ public class IncrementalLoadTasksBuilder {
 
     if (!hasMoreWork()) {
       ReplRemoveFirstIncLoadPendFlagDesc desc = new ReplRemoveFirstIncLoadPendFlagDesc(dbName);
-      Task<?> updateIncPendTask = TaskFactory.get(new DDLWork(inputs, outputs, desc), conf);
+      Task<?> updateIncPendTask = TaskFactory.get(new DDLWork(inputs, outputs, desc,
+              true, dumpDirectory, this.metricCollector), conf);
       taskChainTail.addDependentTask(updateIncPendTask);
       taskChainTail = updateIncPendTask;
 
       Map<String, String> dbProps = new HashMap<>();
       dbProps.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), String.valueOf(lastReplayedEvent));
-      ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, dbProps, metricCollector);
+      ReplStateLogWork replStateLogWork = new ReplStateLogWork(replLogger, dbProps, dumpDirectory, metricCollector);
       Task<?> barrierTask = TaskFactory.get(replStateLogWork, conf);
       taskChainTail.addDependentTask(barrierTask);
       this.log.debug("Added {}:{} as a precursor of barrier task {}:{}",
@@ -230,7 +234,8 @@ public class IncrementalLoadTasksBuilder {
     AlterTableSetPropertiesDesc alterTblDesc = new AlterTableSetPropertiesDesc(tName, partSpec,
         new ReplicationSpec(replState, replState), false, mapProp, false, false, null);
 
-    Task<?> updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterTblDesc), conf);
+    Task<?> updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterTblDesc,
+                                               true, dumpDirectory, metricCollector), conf);
 
     // Link the update repl state task with dependency collection task
     if (preCursor != null) {
@@ -248,7 +253,9 @@ public class IncrementalLoadTasksBuilder {
 
     AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(dbName, mapProp,
         new ReplicationSpec(replState, replState));
-    Task<?> updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterDbDesc), conf);
+    Task<?> updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterDbDesc,
+                                               true, dumpDirectory,
+                                               metricCollector), conf);
 
     // Link the update repl state task with dependency collection task
     if (preCursor != null) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
index 5d71ce0..0d0f55f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
@@ -37,13 +37,20 @@ import org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableSetProperti
 import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.exec.repl.ReplAck;
 import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork;
 import org.apache.hadoop.hive.ql.exec.util.Retryable;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.EximUtil;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.repl.ReplLogger;
+import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
+import org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector;
+import org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector;
+import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector;
+import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector;
 import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
+import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -192,6 +199,17 @@ public class ReplUtils {
     return TaskFactory.get(replLogWork, conf);
   }
 
+  public static Task<?> getTableReplLogTask(ImportTableDesc tableDesc, ReplLogger replLogger, HiveConf conf,
+                                            ReplicationMetricCollector metricCollector,
+                                            String dumpRoot)
+          throws SemanticException {
+    TableType tableType = tableDesc.isExternal() ? TableType.EXTERNAL_TABLE : tableDesc.tableType();
+    ReplStateLogWork replLogWork = new ReplStateLogWork(replLogger, metricCollector,
+            tableDesc.getTableName(), tableType, dumpRoot);
+    return TaskFactory.get(replLogWork, conf);
+  }
+
+
   public static Task<?> getTableCheckpointTask(ImportTableDesc tableDesc, HashMap<String, String> partSpec,
                                                String dumpRoot, HiveConf conf) throws SemanticException {
     HashMap<String, String> mapProp = new HashMap<>();
@@ -203,6 +221,19 @@ public class ReplUtils {
     return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterTblDesc), conf);
   }
 
+  public static Task<?> getTableCheckpointTask(ImportTableDesc tableDesc, HashMap<String, String> partSpec,
+                                               String dumpRoot, ReplicationMetricCollector metricCollector,
+                                               HiveConf conf) throws SemanticException {
+    HashMap<String, String> mapProp = new HashMap<>();
+    mapProp.put(REPL_CHECKPOINT_KEY, dumpRoot);
+
+    final TableName tName = TableName.fromString(tableDesc.getTableName(), null, tableDesc.getDatabaseName());
+    AlterTableSetPropertiesDesc alterTblDesc =  new AlterTableSetPropertiesDesc(tName, partSpec, null, false,
+            mapProp, false, false, null);
+    return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterTblDesc,
+            true, (new Path(dumpRoot)).getParent().toString(), metricCollector), conf);
+  }
+
   public static boolean replCkptStatus(String dbName, Map<String, String> props, String dumpRoot)
           throws InvalidOperationException {
     // If ckpt property not set or empty means, bootstrap is not run on this object.
@@ -256,6 +287,24 @@ public class ReplUtils {
     return taskList;
 
   }
+
+  public static List<Task<?>> addTasksForLoadingColStats(ColumnStatistics colStats,
+                                                         HiveConf conf,
+                                                         UpdatedMetaDataTracker updatedMetadata,
+                                                         org.apache.hadoop.hive.metastore.api.Table tableObj,
+                                                         long writeId,
+                                                         String nonRecoverableMarkPath,
+                                                         ReplicationMetricCollector metricCollector)
+          throws IOException, TException {
+    List<Task<?>> taskList = new ArrayList<>();
+    ColumnStatsUpdateWork work = new ColumnStatsUpdateWork(colStats, nonRecoverableMarkPath, metricCollector, true);
+    work.setWriteId(writeId);
+    Task<?> task = TaskFactory.get(work, conf);
+    taskList.add(task);
+    return taskList;
+
+  }
+
   // Path filters to filter only events (directories) excluding "_bootstrap"
   public static PathFilter getEventsDirectoryFilter(final FileSystem fs) {
     return p -> {
@@ -280,6 +329,52 @@ public class ReplUtils {
     };
   }
 
+  public static int handleException(boolean isReplication, Throwable e, String nonRecoverablePath,
+                                    ReplicationMetricCollector metricCollector, String stageName, HiveConf conf){
+    int errorCode = ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
+    if(isReplication){
+      try {
+        if (nonRecoverablePath != null) {
+          final int recoverableLimit = ErrorMsg.GENERIC_ERROR.getErrorCode();
+          String metricStage = getMetricStageName(stageName, metricCollector);
+          if(errorCode > recoverableLimit){
+            Path nonRecoverableMarker = new Path(new Path(nonRecoverablePath), ReplAck.NON_RECOVERABLE_MARKER.toString());
+            Utils.writeStackTrace(e, nonRecoverableMarker, conf);
+            metricCollector.reportStageEnd(metricStage, Status.FAILED_ADMIN, nonRecoverableMarker.toString());
+          }
+          else {
+            metricCollector.reportStageEnd(metricStage, Status.FAILED);
+          }
+        }
+      } catch (Exception ex) {
+        LOG.error("Failed to collect Metrics ", ex);
+      }
+    }
+    return errorCode;
+  }
+
+  private static String getMetricStageName(String stageName, ReplicationMetricCollector metricCollector) {
+    if( stageName == "REPL_DUMP" || stageName == "REPL_LOAD" || stageName == "ATLAS_DUMP" || stageName == "ATLAS_LOAD"
+            || stageName == "RANGER_DUMP" || stageName == "RANGER_LOAD"){
+      return stageName;
+    }
+    if(isDumpMetricCollector(metricCollector)){
+        return "REPL_DUMP";
+    } else {
+      return "REPL_LOAD";
+    }
+  }
+
+  private static boolean isDumpMetricCollector(ReplicationMetricCollector metricCollector) {
+    return metricCollector instanceof BootstrapDumpMetricCollector || 
+            metricCollector instanceof IncrementalDumpMetricCollector;
+  }
+
+  private static boolean isLoadMetricCollector(ReplicationMetricCollector metricCollector) {
+    return metricCollector instanceof BootstrapLoadMetricCollector ||
+            metricCollector instanceof IncrementalLoadMetricCollector;
+  }
+
   public static boolean isFirstIncPending(Map<String, String> parameters) {
     if (parameters == null) {
       return false;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 3b9bc6f..5e05c73 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.repl.DumpType;
 import org.apache.hadoop.hive.ql.parse.repl.load.MetaData;
 import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.plan.CopyWork;
 import org.apache.hadoop.hive.ql.plan.ImportTableDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
@@ -222,6 +223,23 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
                                       long writeId, // Initialize with 0 for non-ACID and non-MM tables.
                                       MetaData rv
   ) throws IOException, MetaException, HiveException, URISyntaxException {
+    return prepareImport(isImportCmd, isLocationSet, isExternalSet, isPartSpecSet, waitOnPrecursor,
+                         parsedLocation, parsedTableName, overrideDBName, parsedPartSpec, fromLocn,
+                         x, updatedMetadata, txnMgr, writeId, rv, null, null);
+  }
+
+  public static boolean prepareImport(boolean isImportCmd,
+                                      boolean isLocationSet, boolean isExternalSet, boolean isPartSpecSet,
+                                      boolean waitOnPrecursor,
+                                      String parsedLocation, String parsedTableName, String overrideDBName,
+                                      LinkedHashMap<String, String> parsedPartSpec,
+                                      String fromLocn, EximUtil.SemanticAnalyzerWrapperContext x,
+                                      UpdatedMetaDataTracker updatedMetadata, HiveTxnManager txnMgr,
+                                      long writeId, // Initialize with 0 for non-ACID and non-MM tables.
+                                      MetaData rv,
+                                      String dumpRoot,
+                                      ReplicationMetricCollector metricCollector
+  ) throws IOException, MetaException, HiveException, URISyntaxException {
 
     // initialize load path
     URI fromURI = EximUtil.getValidatedURI(x.getConf(), stripQuotes(fromLocn));
@@ -358,7 +376,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       createReplImportTasks(
               tblDesc, partitionDescs,
               replicationSpec, waitOnPrecursor, table,
-              fromURI, wh, x, writeId, stmtId, updatedMetadata);
+              fromURI, wh, x, writeId, stmtId, updatedMetadata, dumpRoot, metricCollector);
     } else {
       createRegularImportTasks(
               tblDesc, partitionDescs,
@@ -405,6 +423,12 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
   private static Task<?> loadTable(URI fromURI, Table table, boolean replace, Path tgtPath,
                                    ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x,
                                    Long writeId, int stmtId) throws HiveException {
+    return loadTable(fromURI, table, replace, tgtPath, replicationSpec, x, writeId,stmtId, null, null);
+  }
+  private static Task<?> loadTable(URI fromURI, Table table, boolean replace, Path tgtPath,
+                                   ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x,
+                                   Long writeId, int stmtId,
+                                   String dumpRoot, ReplicationMetricCollector metricCollector) throws HiveException {
     assert table != null;
     assert table.getParameters() != null;
     Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME);
@@ -462,12 +486,13 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
     if (replicationSpec.isInReplicationScope()) {
       boolean copyAtLoad = x.getConf().getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET);
       copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, destPath, x.getConf(),
-              isSkipTrash, needRecycle, copyAtLoad);
+              isSkipTrash, needRecycle, copyAtLoad, dumpRoot, metricCollector);
     } else {
-      copyTask = TaskFactory.get(new CopyWork(dataPath, destPath, false));
+      copyTask = TaskFactory.get(new CopyWork(dataPath, destPath, false, dumpRoot, metricCollector, true));
     }
 
-    MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), null, null, false);
+    MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), null, null, false,
+                                     dumpRoot, metricCollector, true);
 
 
     if (replicationSpec.isInReplicationScope() && AcidUtils.isTransactionalTable(table)) {
@@ -496,12 +521,26 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
     return tableDesc.getCreateTableTask(x.getInputs(), x.getOutputs(), x.getConf());
   }
 
+  private static Task<?> createTableTask(ImportTableDesc tableDesc, EximUtil.SemanticAnalyzerWrapperContext x,
+                                         String dumpRoot, ReplicationMetricCollector metricCollector) {
+    return tableDesc.getCreateTableTask(x.getInputs(), x.getOutputs(), x.getConf(), true,
+                                        dumpRoot, metricCollector);
+  }
+
   private static Task<?> dropTableTask(Table table, EximUtil.SemanticAnalyzerWrapperContext x,
                                        ReplicationSpec replicationSpec) {
     DropTableDesc dropTblDesc = new DropTableDesc(table.getTableName(), true, false, replicationSpec);
     return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf());
   }
 
+  private static Task<?> dropTableTask(Table table, EximUtil.SemanticAnalyzerWrapperContext x,
+                                       ReplicationSpec replicationSpec, String dumpRoot,
+                                       ReplicationMetricCollector metricCollector) {
+    DropTableDesc dropTblDesc = new DropTableDesc(table.getTableName(), true, false, replicationSpec);
+    return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), dropTblDesc,
+            true, dumpRoot, metricCollector), x.getConf());
+  }
+
   private static Task<?> alterTableTask(ImportTableDesc tableDesc,
                                                              EximUtil.SemanticAnalyzerWrapperContext x,
                                                              ReplicationSpec replicationSpec) {
@@ -512,6 +551,18 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
     return tableDesc.getCreateTableTask(x.getInputs(), x.getOutputs(), x.getConf());
   }
 
+  private static Task<?> alterTableTask(ImportTableDesc tableDesc,
+                                        EximUtil.SemanticAnalyzerWrapperContext x,
+                                        ReplicationSpec replicationSpec, boolean isReplication,
+                                        String dumpRoot, ReplicationMetricCollector metricCollector) {
+    tableDesc.setReplaceMode(true);
+    if ((replicationSpec != null) && (replicationSpec.isInReplicationScope())) {
+      tableDesc.setReplicationSpec(replicationSpec);
+    }
+    return tableDesc.getCreateTableTask(x.getInputs(), x.getOutputs(), x.getConf(), isReplication,
+                                        dumpRoot, metricCollector);
+  }
+
   private static Task<?> alterSinglePartition(
           ImportTableDesc tblDesc, Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc,
           ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn,
@@ -528,11 +579,40 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
     return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf());
   }
 
+  private static Task<?> alterSinglePartition(
+          ImportTableDesc tblDesc, Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc,
+          ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn,
+          EximUtil.SemanticAnalyzerWrapperContext x, boolean isReplication,
+          String dumpRoot, ReplicationMetricCollector metricCollector) throws MetaException, IOException, HiveException {
+    if ((replicationSpec != null) && (replicationSpec.isInReplicationScope())) {
+      addPartitionDesc.setReplicationSpec(replicationSpec);
+    }
+    AlterTableAddPartitionDesc.PartitionDesc partSpec = addPartitionDesc.getPartitions().get(0);
+    if (ptn == null) {
+      fixLocationInPartSpec(tblDesc, table, wh, replicationSpec, partSpec, x);
+    } else if (!externalTablePartition(tblDesc, replicationSpec)) {
+      partSpec.setLocation(ptn.getLocation()); // use existing location
+    }
+    return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc,
+            isReplication, dumpRoot, metricCollector), x.getConf());
+  }
+
   private static Task<?> addSinglePartition(ImportTableDesc tblDesc,
                                             Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc,
                                             ReplicationSpec replicationSpec,
                                             EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId)
           throws MetaException, IOException, HiveException {
+    return addSinglePartition(tblDesc, table, wh, addPartitionDesc, replicationSpec,
+                              x, writeId, stmtId, false, null, null);
+  }
+
+    private static Task<?> addSinglePartition(ImportTableDesc tblDesc,
+                                            Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc,
+                                            ReplicationSpec replicationSpec,
+                                            EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId,
+                                            boolean isReplication, String dumpRoot,
+                                            ReplicationMetricCollector metricCollector)
+          throws MetaException, IOException, HiveException {
     AlterTableAddPartitionDesc.PartitionDesc partSpec = addPartitionDesc.getPartitions().get(0);
     boolean isSkipTrash = false;
     boolean needRecycle = false;
@@ -544,7 +624,8 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       // addPartitionDesc already has the right partition location
       @SuppressWarnings("unchecked")
       Task<?> addPartTask = TaskFactory.get(
-              new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf());
+              new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc, isReplication,
+                      dumpRoot, metricCollector), x.getConf());
       return addPartTask;
     } else {
       String srcLocation = partSpec.getLocation();
@@ -605,9 +686,10 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       if (replicationSpec.isInReplicationScope()) {
         boolean copyAtLoad = x.getConf().getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET);
         copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, new Path(srcLocation), destPath,
-                x.getConf(), isSkipTrash, needRecycle, copyAtLoad);
+                x.getConf(), isSkipTrash, needRecycle, copyAtLoad, dumpRoot, metricCollector);
       } else {
-        copyTask = TaskFactory.get(new CopyWork(new Path(srcLocation), destPath, false));
+        copyTask = TaskFactory.get(new CopyWork(new Path(srcLocation), destPath, false,
+                                                dumpRoot, metricCollector, isReplication));
       }
 
       Task<?> addPartTask = null;
@@ -615,12 +697,12 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
         // During replication, by the time we are applying commit transaction event, we expect
         // the partition/s to be already added or altered by previous events. So no need to
         // create add partition event again.
-        addPartTask = TaskFactory.get(
-                new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf());
+        addPartTask = TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc,
+                                      isReplication, dumpRoot, metricCollector), x.getConf());
       }
 
       MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(),
-              null, null, false);
+              null, null, false, dumpRoot, metricCollector, isReplication);
 
       // Note: this sets LoadFileType incorrectly for ACID; is that relevant for import?
       //       See setLoadFileType and setIsAcidIow calls elsewhere for an example.
@@ -990,7 +1072,8 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
           org.apache.hadoop.hive.ql.metadata.Partition ptn = null;
           if ((ptn = x.getHive().getPartition(table, partSpec, false)) == null) {
             x.getTasks().add(addSinglePartition(
-                tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId));
+                tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId,
+                    false, null, null));
           } else {
             throw new SemanticException(
                 ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec)));
@@ -1021,7 +1104,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       if (isPartitioned(tblDesc)) {
         for (AlterTableAddPartitionDesc addPartitionDesc : partitionDescs) {
           t.addDependentTask(addSinglePartition(tblDesc, table, wh, addPartitionDesc,
-            replicationSpec, x, writeId, stmtId));
+            replicationSpec, x, writeId, stmtId, false, null, null));
         }
       } else {
         x.getLOG().debug("adding dependent CopyWork/MoveWork for table");
@@ -1072,7 +1155,8 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       ReplicationSpec replicationSpec, boolean waitOnPrecursor,
       Table table, URI fromURI, Warehouse wh,
       EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId,
-      UpdatedMetaDataTracker updatedMetadata)
+      UpdatedMetaDataTracker updatedMetadata, String dumpRoot,
+      ReplicationMetricCollector metricCollector)
       throws HiveException, IOException, MetaException {
 
     Task<?> dropTblTask = null;
@@ -1110,7 +1194,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       // We need to drop here to handle the case where the previous incremental load created the table but
       // didn't set the last repl ID due to some failure.
       if (x.getEventType() == DumpType.EVENT_CREATE_TABLE) {
-        dropTblTask = dropTableTask(table, x, replicationSpec);
+        dropTblTask = dropTableTask(table, x, replicationSpec, dumpRoot, metricCollector);
         table = null;
       } else if (!firstIncPending) {
         //If in db pending flag is not set then check in table parameter for table level load.
@@ -1171,10 +1255,12 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
           addPartitionDesc.setReplicationSpec(replicationSpec);
           if (!replicationSpec.isMetadataOnly()) {
             dependentTasks.add(addSinglePartition(tblDesc, table, wh, addPartitionDesc,
-                                                replicationSpec, x, writeId, stmtId));
+                                                replicationSpec, x, writeId, stmtId,
+                                                true, dumpRoot, metricCollector));
           } else {
             dependentTasks.add(alterSinglePartition(tblDesc, table, wh, addPartitionDesc,
-                                                  replicationSpec, null, x));
+                                                  replicationSpec, null, x, true,
+                                                  dumpRoot, metricCollector));
           }
           if (updatedMetadata != null) {
             updatedMetadata.addPartition(table.getDbName(), table.getTableName(),
@@ -1185,7 +1271,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
               && !shouldSkipDataCopyInReplScope(tblDesc, replicationSpec)) {
         x.getLOG().debug("adding dependent CopyWork/MoveWork for table");
         dependentTasks = Collections.singletonList(loadTable(fromURI, table, replicationSpec.isReplace(),
-            new Path(tblDesc.getLocation()), replicationSpec, x, writeId, stmtId));
+            new Path(tblDesc.getLocation()), replicationSpec, x, writeId, stmtId, dumpRoot, metricCollector));
       }
 
       // During replication, by the time we replay a commit transaction event, the table should
@@ -1196,7 +1282,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
         if (x.getEventType() == DumpType.EVENT_CREATE_TABLE && !tblDesc.isExternal()) {
           tblDesc.setLocation(null);
         }
-        Task t = createTableTask(tblDesc, x);
+        Task t = createTableTask(tblDesc, x, dumpRoot, metricCollector);
         if (dependentTasks != null) {
           dependentTasks.forEach(task -> t.addDependentTask(task));
         }
@@ -1250,13 +1336,15 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
           if (ptn == null) {
             if (!replicationSpec.isMetadataOnly()){
               x.getTasks().add(addSinglePartition(
-                  tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId));
+                  tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId,
+                      true, dumpRoot, metricCollector));
               if (updatedMetadata != null) {
                 updatedMetadata.addPartition(table.getDbName(), table.getTableName(), partSpec);
               }
             } else {
               x.getTasks().add(alterSinglePartition(
-                  tblDesc, table, wh, addPartitionDesc, replicationSpec, null, x));
+                  tblDesc, table, wh, addPartitionDesc, replicationSpec, null, x,
+                      true, dumpRoot, metricCollector));
               if (updatedMetadata != null) {
                 updatedMetadata.addPartition(table.getDbName(), table.getTableName(), partSpec);
               }
@@ -1266,11 +1354,11 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
             // the destination ptn's repl.last.id is older than the replacement's.
             if (replicationSpec.allowReplacementInto(ptn.getParameters())){
               if (!replicationSpec.isMetadataOnly()){
-                x.getTasks().add(addSinglePartition(
-                    tblDesc, table, wh, addPartitionDesc, replicationSpec, x, writeId, stmtId));
+                x.getTasks().add(addSinglePartition(tblDesc, table, wh, addPartitionDesc, replicationSpec, x,
+                                                    writeId, stmtId, true, dumpRoot, metricCollector));
               } else {
                 x.getTasks().add(alterSinglePartition(
-                    tblDesc, table, wh, addPartitionDesc, replicationSpec, ptn, x));
+                    tblDesc, table, wh, addPartitionDesc, replicationSpec, ptn, x, true, dumpRoot, metricCollector));
               }
               if (updatedMetadata != null) {
                 updatedMetadata.addPartition(table.getDbName(), table.getTableName(), partSpec);
@@ -1283,7 +1371,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
         }
         if (replicationSpec.isMetadataOnly() && partitionDescs.isEmpty()){
           // MD-ONLY table alter
-          x.getTasks().add(alterTableTask(tblDesc, x,replicationSpec));
+          x.getTasks().add(alterTableTask(tblDesc, x,replicationSpec, true, dumpRoot, metricCollector));
           if (lockType == WriteEntity.WriteType.DDL_NO_LOCK){
             lockType = WriteEntity.WriteType.DDL_SHARED;
           }
@@ -1296,9 +1384,9 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
         if (!replicationSpec.isMetadataOnly()) {
           // repl-imports are replace-into unless the event is insert-into
           loadTable(fromURI, table, replicationSpec.isReplace(), new Path(tblDesc.getLocation()),
-            replicationSpec, x, writeId, stmtId);
+            replicationSpec, x, writeId, stmtId, dumpRoot, metricCollector);
         } else {
-          x.getTasks().add(alterTableTask(tblDesc, x, replicationSpec));
+          x.getTasks().add(alterTableTask(tblDesc, x, replicationSpec, true, dumpRoot, metricCollector));
         }
         if (lockType == WriteEntity.WriteType.DDL_NO_LOCK){
           lockType = WriteEntity.WriteType.DDL_SHARED;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
index c7656bc..5805a9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
@@ -168,7 +168,7 @@ public class Utils {
     }
   }
 
-  public static void writeStackTrace(Exception e, Path outputFile, HiveConf conf) throws SemanticException {
+  public static void writeStackTrace(Throwable e, Path outputFile, HiveConf conf) throws SemanticException {
     Retryable retryable = Retryable.builder()
       .withHiveConf(conf)
       .withRetryOnException(IOException.class).withFailOnException(FileNotFoundException.class).build();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java
index dc40e1d..b6d43f7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java
@@ -173,6 +173,10 @@ public class DumpMetaData {
     return dumpFile;
   }
 
+  public static String getDmdFileName() {
+    return DUMP_METADATA;
+  }
+
   public boolean isIncrementalDump() throws SemanticException {
     initializeIfNot();
     return (this.dumpType == DumpType.INCREMENTAL);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java
index b1c2709..c92ef25 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbortTxnHandler.java
@@ -45,7 +45,8 @@ public class AbortTxnHandler extends AbstractMessageHandler {
 
     Task<ReplTxnWork> abortTxnTask = TaskFactory.get(
         new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.dbName, null,
-                msg.getTxnId(), ReplTxnWork.OperationType.REPL_ABORT_TXN, context.eventOnlyReplicationSpec()),
+                msg.getTxnId(), ReplTxnWork.OperationType.REPL_ABORT_TXN, context.eventOnlyReplicationSpec(),
+                context.getDumpDirectory(), context.getMetricCollector()),
         context.hiveConf
     );
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddCheckConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddCheckConstraintHandler.java
index ce33169..82f93fa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddCheckConstraintHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddCheckConstraintHandler.java
@@ -70,7 +70,8 @@ public class AddCheckConstraintHandler extends AbstractMessageHandler {
     AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName,
       context.eventOnlyReplicationSpec(), constraints);
     Task<DDLWork> addConstraintsTask = TaskFactory.get(
-      new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf);
+      new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, true,
+              context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
     tasks.add(addConstraintsTask);
     context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddDefaultConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddDefaultConstraintHandler.java
index b17126e..6022105 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddDefaultConstraintHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddDefaultConstraintHandler.java
@@ -69,7 +69,8 @@ public class AddDefaultConstraintHandler extends AbstractMessageHandler {
     AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName,
       context.eventOnlyReplicationSpec(), constraints);
     Task<DDLWork> addConstraintsTask = TaskFactory.get(
-      new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf);
+      new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, true,
+              context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
     tasks.add(addConstraintsTask);
     context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java
index 6f98373..5f723ba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java
@@ -72,7 +72,10 @@ public class AddForeignKeyHandler extends AbstractMessageHandler {
     AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName,
         context.eventOnlyReplicationSpec(), constraints);
     Task<DDLWork> addConstraintsTask = TaskFactory.get(
-            new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf);
+            new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc,
+                    true, context.getDumpDirectory(), context.getMetricCollector()),
+            context.hiveConf
+            );
     tasks.add(addConstraintsTask);
     context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java
index 995c5d2..c6eaed7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java
@@ -66,8 +66,8 @@ public class AddNotNullConstraintHandler extends AbstractMessageHandler {
     Constraints constraints = new Constraints(null, null, nns, null, null, null);
     AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName,
         context.eventOnlyReplicationSpec(), constraints);
-    Task<DDLWork> addConstraintsTask = TaskFactory.get(
-            new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf);
+    Task<DDLWork> addConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc,
+            true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
     tasks.add(addConstraintsTask);
     context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java
index f6decc2..bddaf37 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java
@@ -66,8 +66,9 @@ public class AddPrimaryKeyHandler extends AbstractMessageHandler {
     Constraints constraints = new Constraints(pks, null, null, null, null, null);
     AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName,
         context.eventOnlyReplicationSpec(), constraints);
-    Task<DDLWork> addConstraintsTask = TaskFactory.get(
-            new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf);
+    Task<DDLWork> addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet,
+            addConstraintsDesc, true, context.getDumpDirectory(),
+            context.getMetricCollector()), context.hiveConf);
     tasks.add(addConstraintsTask);
     context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java
index e1c1d3a..adbe13a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java
@@ -67,7 +67,8 @@ public class AddUniqueConstraintHandler extends AbstractMessageHandler {
     AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName,
         context.eventOnlyReplicationSpec(), constraints);
     Task<DDLWork> addConstraintsTask = TaskFactory.get(
-            new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf);
+            new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc,
+                    true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
     tasks.add(addConstraintsTask);
     context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java
index f9a0750..d62a669 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AllocWriteIdHandler.java
@@ -51,7 +51,8 @@ public class AllocWriteIdHandler extends AbstractMessageHandler {
 
     // Repl policy should be created based on the table name in context.
     ReplTxnWork work = new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), dbName, tableName,
-        ReplTxnWork.OperationType.REPL_ALLOC_WRITE_ID, msg.getTxnToWriteIdList(), context.eventOnlyReplicationSpec());
+        ReplTxnWork.OperationType.REPL_ALLOC_WRITE_ID, msg.getTxnToWriteIdList(), context.eventOnlyReplicationSpec(),
+            context.getDumpDirectory(), context.getMetricCollector());
 
     Task<?> allocWriteIdTask = TaskFactory.get(work, context.hiveConf);
     context.log.info("Added alloc write id task : {}", allocWriteIdTask.getId());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java
index 76c7dd5..041a4c9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java
@@ -78,8 +78,8 @@ public class AlterDatabaseHandler extends AbstractMessageHandler {
             newDb.getOwnerType()), context.eventOnlyReplicationSpec());
       }
 
-      Task<DDLWork> alterDbTask = TaskFactory.get(
-          new DDLWork(readEntitySet, writeEntitySet, alterDbDesc), context.hiveConf);
+      Task<DDLWork> alterDbTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet,
+                       alterDbDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
       context.log.debug("Added alter database task : {}:{}",
               alterDbTask.getId(), actualDbName);
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java
index 86f1cb9..2224793 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CommitTxnHandler.java
@@ -55,7 +55,9 @@ public class CommitTxnHandler extends AbstractMessageHandler {
     String tblName = null;
 
     ReplTxnWork work = new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.dbName,
-        null, msg.getTxnId(), ReplTxnWork.OperationType.REPL_COMMIT_TXN, context.eventOnlyReplicationSpec());
+                                       null, msg.getTxnId(), ReplTxnWork.OperationType.REPL_COMMIT_TXN,
+                                        context.eventOnlyReplicationSpec(), context.getDumpDirectory(),
+                                        context.getMetricCollector());
 
     if (numEntry > 0) {
       context.log.debug("Commit txn handler for txnid " + msg.getTxnId() + " databases : " + msg.getDatabases() +
@@ -76,7 +78,8 @@ public class CommitTxnHandler extends AbstractMessageHandler {
         tblName = actualTblName;
         // for warehouse level dump, use db name from write event
         dbName = (context.isDbNameEmpty() ? actualDBName : context.dbName);
-        Context currentContext = new Context(context, dbName);
+        Context currentContext = new Context(context, dbName,
+                context.getDumpDirectory(), context.getMetricCollector());
         currentContext.setLocation(location.toUri().toString());
 
         // Piggybacking in Import logic for now
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java
index c2e652a..cf78798 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateDatabaseHandler.java
@@ -44,7 +44,7 @@ public class CreateDatabaseHandler extends AbstractMessageHandler {
 
   @Override
   public List<Task<?>> handle(Context context)
-      throws SemanticException {
+          throws SemanticException {
     MetaData metaData;
     try {
       FileSystem fs = FileSystem.get(new Path(context.location).toUri(), context.hiveConf);
@@ -60,20 +60,23 @@ public class CreateDatabaseHandler extends AbstractMessageHandler {
     CreateDatabaseDesc createDatabaseDesc =
         new CreateDatabaseDesc(destinationDBName, db.getDescription(), null, null, true, db.getParameters());
     Task<DDLWork> createDBTask = TaskFactory.get(
-        new DDLWork(new HashSet<>(), new HashSet<>(), createDatabaseDesc), context.hiveConf);
+        new DDLWork(new HashSet<>(), new HashSet<>(), createDatabaseDesc, true,
+                context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
     if (!db.getParameters().isEmpty()) {
       AlterDatabaseSetPropertiesDesc alterDbDesc = new AlterDatabaseSetPropertiesDesc(destinationDBName,
           db.getParameters(), context.eventOnlyReplicationSpec());
-      Task<DDLWork> alterDbProperties = TaskFactory
-          .get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbDesc), context.hiveConf);
+      Task<DDLWork> alterDbProperties = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(),
+                                        alterDbDesc, true, context.getDumpDirectory(),
+                                        context.getMetricCollector()), context.hiveConf);
       createDBTask.addDependentTask(alterDbProperties);
     }
     if (StringUtils.isNotEmpty(db.getOwnerName())) {
       AlterDatabaseSetOwnerDesc alterDbOwner = new AlterDatabaseSetOwnerDesc(destinationDBName,
           new PrincipalDesc(db.getOwnerName(), db.getOwnerType()),
           context.eventOnlyReplicationSpec());
-      Task<DDLWork> alterDbTask = TaskFactory
-          .get(new DDLWork(new HashSet<>(), new HashSet<>(), alterDbOwner), context.hiveConf);
+      Task<DDLWork> alterDbTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(),
+              alterDbOwner, true, context.getDumpDirectory(), context.getMetricCollector()),
+              context.hiveConf);
       createDBTask.addDependentTask(alterDbTask);
     }
     updatedMetadata
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java
index e65769a..b934ca4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/CreateFunctionHandler.java
@@ -62,10 +62,10 @@ public class CreateFunctionHandler extends AbstractMessageHandler {
       FunctionDescBuilder builder = new FunctionDescBuilder(context);
       CreateFunctionDesc descToLoad = builder.build();
       this.functionName = builder.metadata.function.getFunctionName();
-
       context.log.debug("Loading function desc : {}", descToLoad.toString());
       Task<DDLWork> createTask = TaskFactory.get(
-          new DDLWork(readEntitySet, writeEntitySet, descToLoad), context.hiveConf);
+          new DDLWork(readEntitySet, writeEntitySet, descToLoad,
+                      true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
       context.log.debug("Added create function task : {}:{},{}", createTask.getId(),
           descToLoad.getName(), descToLoad.getClassName());
       // This null check is specifically done as the same class is used to handle both incremental and
@@ -203,9 +203,11 @@ public class CreateFunctionHandler extends AbstractMessageHandler {
     private Task<?> getCopyTask(String sourceUri, Path dest) {
       boolean copyAtLoad = context.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET);
       if (copyAtLoad ) {
-        return ReplCopyTask.getLoadCopyTask(metadata.getReplicationSpec(), new Path(sourceUri), dest, context.hiveConf);
+        return ReplCopyTask.getLoadCopyTask(metadata.getReplicationSpec(), new Path(sourceUri), dest, context.hiveConf,
+                context.getDumpDirectory(), context.getMetricCollector());
       } else {
-        return TaskFactory.get(new CopyWork(new Path(sourceUri), dest, true, false), context.hiveConf);
+        return TaskFactory.get(new CopyWork(new Path(sourceUri), dest, true, false,
+                context.getDumpDirectory(), context.getMetricCollector(), true), context.hiveConf);
       }
     }
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java
index 34d3b00..70299f1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java
@@ -43,7 +43,8 @@ public class DropConstraintHandler extends AbstractMessageHandler {
     AlterTableDropConstraintDesc dropConstraintsDesc =
         new AlterTableDropConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraintName);
     Task<DDLWork> dropConstraintsTask = TaskFactory.get(
-            new DDLWork(readEntitySet, writeEntitySet, dropConstraintsDesc), context.hiveConf);
+            new DDLWork(readEntitySet, writeEntitySet, dropConstraintsDesc, true,
+                    context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
     context.log.debug("Added drop constrain task : {}:{}", dropConstraintsTask.getId(), actualTblName);
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
     return Collections.singletonList(dropConstraintsTask);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java
index c10174a..8ac874a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropDatabaseHandler.java
@@ -37,8 +37,8 @@ public class DropDatabaseHandler extends AbstractMessageHandler {
         deserializer.getDropDatabaseMessage(context.dmd.getPayload());
     String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
     DropDatabaseDesc desc = new DropDatabaseDesc(actualDbName, true, context.eventOnlyReplicationSpec());
-    Task<?> dropDBTask =
-        TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), desc), context.hiveConf);
+    Task<?> dropDBTask = TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), desc,
+                true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
     context.log.info(
         "Added drop database task : {}:{}", dropDBTask.getId(), desc.getDatabaseName());
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java
index a3d5fd0..55d9232 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropFunctionHandler.java
@@ -47,7 +47,8 @@ public class DropFunctionHandler extends AbstractMessageHandler {
     DropFunctionDesc desc = new DropFunctionDesc(
             qualifiedFunctionName, false, context.eventOnlyReplicationSpec());
     Task<DDLWork> dropFunctionTask =
-        TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, desc), context.hiveConf);
+        TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, desc, true,
+                context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
     context.log.debug(
         "Added drop function task : {}:{}", dropFunctionTask.getId(), desc.getName()
     );
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
index 066549d..448cb2f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
@@ -47,8 +47,8 @@ public class DropPartitionHandler extends AbstractMessageHandler {
         AlterTableDropPartitionDesc dropPtnDesc =
             new AlterTableDropPartitionDesc(HiveTableName.ofNullable(actualTblName, actualDbName), partSpecs, true,
                 context.eventOnlyReplicationSpec());
-        Task<DDLWork> dropPtnTask = TaskFactory.get(
-            new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc), context.hiveConf
+        Task<DDLWork> dropPtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc,
+                    true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf
         );
         context.log.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(),
             dropPtnDesc.getTableName(), msg.getPartitions());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java
index ec4cb82..31649b3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java
@@ -49,7 +49,8 @@ public class DropTableHandler extends AbstractMessageHandler {
     DropTableDesc dropTableDesc = new DropTableDesc(actualDbName + "." + actualTblName, true, true,
         context.eventOnlyReplicationSpec(), false);
     Task<DDLWork> dropTableTask = TaskFactory.get(
-        new DDLWork(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf
+        new DDLWork(readEntitySet, writeEntitySet, dropTableDesc, true,
+                context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf
     );
     context.log.debug(
         "Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName()
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java
index 4b8274d..611c486 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java
@@ -55,7 +55,8 @@ public class InsertHandler extends AbstractMessageHandler {
     InsertMessage insertMessage = deserializer.getInsertMessage(withinContext.dmd.getPayload());
     String actualDbName =
         withinContext.isDbNameEmpty() ? insertMessage.getDB() : withinContext.dbName;
-    Context currentContext = new Context(withinContext, actualDbName);
+    Context currentContext = new Context(withinContext, actualDbName,
+                                         withinContext.getDumpDirectory(), withinContext.getMetricCollector());
 
     // Piggybacking in Import logic for now
     TableHandler tableHandler = new TableHandler();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java
index 2851880..57d62b3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.slf4j.Logger;
 
 import java.io.Serializable;
@@ -54,6 +55,8 @@ public interface MessageHandler {
     final Hive db;
     final org.apache.hadoop.hive.ql.Context nestedContext;
     final Logger log;
+    String dumpDirectory;
+    private transient ReplicationMetricCollector metricCollector;
 
     public Context(String dbName, String location,
         Task<?> precursor, DumpMetaData dmd, HiveConf hiveConf,
@@ -68,6 +71,22 @@ public interface MessageHandler {
       this.log = log;
     }
 
+    public Context(String dbName, String location,
+                   Task<?> precursor, DumpMetaData dmd, HiveConf hiveConf,
+                   Hive db, org.apache.hadoop.hive.ql.Context nestedContext, Logger log,
+                   String dumpDirectory, ReplicationMetricCollector metricCollector) {
+      this.dbName = dbName;
+      this.location = location;
+      this.precursor = precursor;
+      this.dmd = dmd;
+      this.hiveConf = hiveConf;
+      this.db = db;
+      this.nestedContext = nestedContext;
+      this.log = log;
+      this.dumpDirectory = dumpDirectory;
+      this.metricCollector = metricCollector;
+    }
+
     public Context(Context other, String dbName) {
       this.dbName = dbName;
       this.location = other.location;
@@ -79,6 +98,19 @@ public interface MessageHandler {
       this.log = other.log;
     }
 
+    public Context(Context other, String dbName, String dumpDirectory, ReplicationMetricCollector metricCollector) {
+      this.dbName = dbName;
+      this.location = other.location;
+      this.precursor = other.precursor;
+      this.dmd = other.dmd;
+      this.hiveConf = other.hiveConf;
+      this.db = other.db;
+      this.nestedContext = other.nestedContext;
+      this.log = other.log;
+      this.dumpDirectory = dumpDirectory;
+      this.metricCollector = metricCollector;
+    }
+
     public boolean isDbNameEmpty() {
       return StringUtils.isEmpty(dbName);
     }
@@ -96,6 +128,14 @@ public interface MessageHandler {
       return nestedContext;
     }
 
+    public String getDumpDirectory() {
+      return dumpDirectory;
+    }
+
+    public ReplicationMetricCollector getMetricCollector() {
+      return metricCollector;
+    }
+
     public HiveTxnManager getTxnMgr() {
       return nestedContext.getHiveTxnManager();
     }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java
index cd7274d..dc61814 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/OpenTxnHandler.java
@@ -44,7 +44,8 @@ public class OpenTxnHandler extends AbstractMessageHandler {
 
     Task<ReplTxnWork> openTxnTask = TaskFactory.get(
         new ReplTxnWork(HiveUtils.getReplPolicy(context.dbName), context.dbName, null,
-                msg.getTxnIds(), ReplTxnWork.OperationType.REPL_OPEN_TXN, context.eventOnlyReplicationSpec()),
+                msg.getTxnIds(), ReplTxnWork.OperationType.REPL_OPEN_TXN, context.eventOnlyReplicationSpec(),
+                context.getDumpDirectory(), context.getMetricCollector()),
         context.hiveConf
     );
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
index ed7aa8d..1627337 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
@@ -61,7 +61,8 @@ public class RenamePartitionHandler extends AbstractMessageHandler {
               tableName, oldPartSpec, newPartSpec, replicationSpec, null);
       renamePtnDesc.setWriteId(msg.getWriteId());
       Task<DDLWork> renamePtnTask = TaskFactory.get(
-          new DDLWork(readEntitySet, writeEntitySet, renamePtnDesc), context.hiveConf);
+          new DDLWork(readEntitySet, writeEntitySet, renamePtnDesc, true,
+                  context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
       context.log.debug("Added rename ptn task : {}:{}->{}",
                         renamePtnTask.getId(), oldPartSpec, newPartSpec);
       updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, newPartSpec);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
index 05e094b..2e673b3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
@@ -61,8 +61,9 @@ public class RenameTableHandler extends AbstractMessageHandler {
       AlterTableRenameDesc renameTableDesc =
           new AlterTableRenameDesc(oldName, replicationSpec, false, newName.getNotEmptyDbTable());
       renameTableDesc.setWriteId(msg.getWriteId());
-      Task<DDLWork> renameTableTask = TaskFactory.get(
-          new DDLWork(readEntitySet, writeEntitySet, renameTableDesc), context.hiveConf);
+      Task<DDLWork> renameTableTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet,
+              renameTableDesc, true, context.getDumpDirectory(),
+              context.getMetricCollector()), context.hiveConf);
       context.log.debug("Added rename table task : {}:{}->{}",
                         renameTableTask.getId(), oldName.getNotEmptyDbTable(), newName.getNotEmptyDbTable());
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
index fda6c6c..a8d2bff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
@@ -67,11 +67,13 @@ public class TableHandler extends AbstractMessageHandler {
 
       // REPL LOAD is not partition level. It is always DB or table level. So, passing null for partition specs.
       if (TableType.VIRTUAL_VIEW.name().equals(rv.getTable().getTableType())) {
-        importTasks.add(ReplLoadTask.createViewTask(rv, context.dbName, context.hiveConf));
+        importTasks.add(ReplLoadTask.createViewTask(rv, context.dbName, context.hiveConf,
+                context.getDumpDirectory(), context.getMetricCollector()));
       } else {
         ImportSemanticAnalyzer.prepareImport(false, isLocationSet, isExternal, false,
             (context.precursor != null), parsedLocation, null, context.dbName,
-            null, context.location, x, updatedMetadata, context.getTxnMgr(), tuple.writeId, rv);
+            null, context.location, x, updatedMetadata, context.getTxnMgr(), tuple.writeId, rv,
+                context.getDumpDirectory(), context.getMetricCollector());
       }
 
       Task<?> openTxnTask = x.getOpenTxnTask();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
index 85e9f92..2af9f13 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
@@ -60,7 +60,8 @@ public class TruncatePartitionHandler extends AbstractMessageHandler {
             context.eventOnlyReplicationSpec());
     truncateTableDesc.setWriteId(msg.getWriteId());
     Task<DDLWork> truncatePtnTask = TaskFactory.get(
-        new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf);
+        new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc, true,
+                context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
     context.log.debug("Added truncate ptn task : {}:{}:{}", truncatePtnTask.getId(),
         truncateTableDesc.getTableName(), truncateTableDesc.getWriteId());
     updatedMetadata.set(context.dmd.getEventTo().toString(), tName.getDb(), tName.getTable(), partSpec);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
index 6a50c8a..552cbed 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
@@ -37,8 +37,9 @@ public class TruncateTableHandler extends AbstractMessageHandler {
 
     TruncateTableDesc truncateTableDesc = new TruncateTableDesc(tName, null, context.eventOnlyReplicationSpec());
     truncateTableDesc.setWriteId(msg.getWriteId());
-    Task<DDLWork> truncateTableTask = TaskFactory.get(
-        new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc), context.hiveConf);
+    Task<DDLWork> truncateTableTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet,
+                truncateTableDesc, true, context.getDumpDirectory(),
+                context.getMetricCollector()), context.hiveConf);
 
     context.log.debug("Added truncate tbl task : {}:{}:{}", truncateTableTask.getId(),
         truncateTableDesc.getTableName(), truncateTableDesc.getWriteId());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java
index 8b1741e..9438c99 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdatePartColStatHandler.java
@@ -53,7 +53,9 @@ public class UpdatePartColStatHandler extends AbstractMessageHandler {
 
     try {
       return ReplUtils.addTasksForLoadingColStats(colStats, context.hiveConf, updatedMetadata,
-                                                  upcsm.getTableObject(), upcsm.getWriteId());
+                                                  upcsm.getTableObject(), upcsm.getWriteId(),
+                                                  context.getDumpDirectory(),
+                                                  context.getMetricCollector());
     } catch(Exception e) {
       throw new SemanticException(e);
     }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java
index 6c54f97..a7f189e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/UpdateTableColStatHandler.java
@@ -53,7 +53,8 @@ public class UpdateTableColStatHandler extends AbstractMessageHandler {
 
         try {
             return ReplUtils.addTasksForLoadingColStats(colStats, context.hiveConf, updatedMetadata,
-                    utcsm.getTableObject(), utcsm.getWriteId());
+                    utcsm.getTableObject(), utcsm.getWriteId(), context.getDumpDirectory(),
+                    context.getMetricCollector());
         } catch(Exception e) {
             throw new SemanticException(e);
         }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
index 59bc626..0b4b3cf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
@@ -75,8 +75,12 @@ public abstract class ReplicationMetricCollector {
       LOG.debug("Stage ended {}, {}, {}", stageName, status, lastReplId );
       Progress progress = replicationMetric.getProgress();
       Stage stage = progress.getStageByName(stageName);
+      if(stage == null){
+        stage = new Stage(stageName, status, -1L);
+      }
       stage.setStatus(status);
       stage.setEndTime(System.currentTimeMillis());
+      progress.addStage(stage);
       replicationMetric.setProgress(progress);
       Metadata metadata = replicationMetric.getMetadata();
       metadata.setLastReplId(lastReplId);
@@ -93,9 +97,13 @@ public abstract class ReplicationMetricCollector {
       LOG.debug("Stage Ended {}, {}", stageName, status );
       Progress progress = replicationMetric.getProgress();
       Stage stage = progress.getStageByName(stageName);
+      if(stage == null){
+        stage = new Stage(stageName, status, -1L);
+      }
       stage.setStatus(status);
       stage.setEndTime(System.currentTimeMillis());
       stage.setErrorLogPath(errorLogPath);
+      progress.addStage(stage);
       replicationMetric.setProgress(progress);
       metricCollector.addMetric(replicationMetric);
       if (Status.FAILED == status || Status.FAILED_ADMIN == status) {
@@ -109,8 +117,12 @@ public abstract class ReplicationMetricCollector {
       LOG.debug("Stage Ended {}, {}", stageName, status );
       Progress progress = replicationMetric.getProgress();
       Stage stage = progress.getStageByName(stageName);
+      if(stage == null){
+        stage = new Stage(stageName, status, -1L);
+      }
       stage.setStatus(status);
       stage.setEndTime(System.currentTimeMillis());
+      progress.addStage(stage);
       replicationMetric.setProgress(progress);
       metricCollector.addMetric(replicationMetric);
       if (Status.FAILED == status || Status.FAILED_ADMIN == status) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java
index 1490025..ac27eb9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java
@@ -23,6 +23,7 @@ import java.util.Map;
 
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 
@@ -45,6 +46,9 @@ public class ColumnStatsUpdateWork implements Serializable, DDLDescWithWriteId {
   private final String colType;
   private final ColumnStatistics colStats;
   private long writeId;
+  private boolean isReplication;
+  private String dumpDirectory;
+  private transient ReplicationMetricCollector metricCollector;
 
   public ColumnStatsUpdateWork(String partName,
       Map<String, String> mapProp,
@@ -71,11 +75,33 @@ public class ColumnStatsUpdateWork implements Serializable, DDLDescWithWriteId {
     this.colType = null;
   }
 
+  public ColumnStatsUpdateWork(ColumnStatistics colStats, String dumpRoot, ReplicationMetricCollector metricCollector,
+                               boolean isReplication) {
+    this.colStats = colStats;
+    this.partName = null;
+    this.mapProp = null;
+    this.dbName = null;
+    this.tableName = null;
+    this.colName = null;
+    this.colType = null;
+    this.dumpDirectory = dumpRoot;
+    this.metricCollector = metricCollector;
+    this.isReplication = true;
+  }
+
   @Override
   public String toString() {
     return null;
   }
 
+  public String getDumpDirectory() {
+    return dumpDirectory;
+  }
+
+  public boolean isReplication() {
+    return isReplication;
+  }
+
   public String getPartName() {
     return partName;
   }
@@ -102,6 +128,11 @@ public class ColumnStatsUpdateWork implements Serializable, DDLDescWithWriteId {
 
   public ColumnStatistics getColStats() { return colStats; }
 
+  public ReplicationMetricCollector getMetricCollector() {
+    return metricCollector;
+  }
+
+
   @Override
   public void setWriteId(long writeId) {
     this.writeId = writeId;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java
index f69776a..2439cde 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.plan;
 import java.io.Serializable;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 /**
@@ -34,6 +35,9 @@ public class CopyWork implements Serializable {
   private Path[] toPath;
   private boolean errorOnSrcEmpty;
   private boolean overwrite = true;
+  private boolean isReplication;
+  private String dumpDirectory;
+  private transient ReplicationMetricCollector metricCollector;
 
   public CopyWork() {
   }
@@ -43,12 +47,32 @@ public class CopyWork implements Serializable {
     this.setErrorOnSrcEmpty(errorOnSrcEmpty);
   }
 
+  public CopyWork(final Path fromPath, final Path toPath, boolean errorOnSrcEmpty,
+                  String dumpDirectory, ReplicationMetricCollector metricCollector,
+                  boolean isReplication) {
+    this(new Path[] { fromPath }, new Path[] { toPath });
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+    this.setErrorOnSrcEmpty(errorOnSrcEmpty);
+    this.isReplication = isReplication;
+  }
+
   public CopyWork(final Path fromPath, final Path toPath, boolean errorOnSrcEmpty, boolean overwrite) {
     this(new Path[] { fromPath }, new Path[] { toPath });
     this.setErrorOnSrcEmpty(errorOnSrcEmpty);
     this.setOverwrite(overwrite);
   }
 
+  public CopyWork(final Path fromPath, final Path toPath, boolean errorOnSrcEmpty, boolean overwrite,
+                  String dumpDirectory, ReplicationMetricCollector metricCollector, boolean isReplication) {
+    this(new Path[] { fromPath }, new Path[] { toPath });
+    this.setErrorOnSrcEmpty(errorOnSrcEmpty);
+    this.setOverwrite(overwrite);
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+    this.isReplication = isReplication;
+  }
+
   public CopyWork(final Path[] fromPath, final Path[] toPath) {
     if (fromPath.length != toPath.length) {
       throw new RuntimeException(
@@ -87,6 +111,16 @@ public class CopyWork implements Serializable {
     return toPath;
   }
 
+  public ReplicationMetricCollector getMetricCollector() {
+    return metricCollector;
+  }
+
+  public String getDumpDirectory() {
+    return dumpDirectory;
+  }
+
+  public boolean isReplication() { return isReplication; }
+
   public void setErrorOnSrcEmpty(boolean errorOnSrcEmpty) {
     this.errorOnSrcEmpty = errorOnSrcEmpty;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
index 41fbe2e..5dd7fc5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.HiveTableName;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 
 /**
  * ImportTableDesc.
@@ -163,6 +164,13 @@ public class ImportTableDesc {
     return TaskFactory.get(new DDLWork(inputs, outputs, createTblDesc), conf);
   }
 
+  public Task<?> getCreateTableTask(Set<ReadEntity> inputs, Set<WriteEntity> outputs, HiveConf conf,
+                                    boolean isReplication,
+                                    String dumpRoot, ReplicationMetricCollector metricCollector) {
+    return TaskFactory.get(new DDLWork(inputs, outputs, createTblDesc, isReplication,
+            dumpRoot, metricCollector), conf);
+  }
+
   public TableType tableType() {
     return TableType.MANAGED_TABLE;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
index d7253a4..4fd3768 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 /**
@@ -41,6 +42,9 @@ public class MoveWork implements Serializable {
   private boolean checkFileFormat;
   private boolean srcLocal;
   private boolean needCleanTarget;
+  private boolean isReplication;
+  private String dumpDirectory;
+  private transient ReplicationMetricCollector metricCollector;
 
   /**
    * ReadEntitites that are passed to the hooks.
@@ -87,6 +91,16 @@ public class MoveWork implements Serializable {
     this(inputs, outputs, loadTableWork, loadFileWork, checkFileFormat, false);
   }
 
+  public MoveWork(Set<ReadEntity> inputs, Set<WriteEntity> outputs,
+                  final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork,
+                  boolean checkFileFormat, String dumpRoot, ReplicationMetricCollector metricCollector,
+                  boolean isReplication) {
+    this(inputs, outputs, loadTableWork, loadFileWork, checkFileFormat, false);
+    this.dumpDirectory = dumpRoot;
+    this.metricCollector = metricCollector;
+    this.isReplication = isReplication;
+  }
+
   public MoveWork(final MoveWork o) {
     loadTableWork = o.getLoadTableWork();
     loadFileWork = o.getLoadFileWork();
@@ -169,6 +183,18 @@ public class MoveWork implements Serializable {
     this.isInReplicationScope = isInReplicationScope;
   }
 
+  public ReplicationMetricCollector getMetricCollector() {
+    return metricCollector;
+  }
+
+  public String getDumpDirectory() {
+    return dumpDirectory;
+  }
+
+  public boolean isReplication() {
+    return isReplication;
+  }
+
   public boolean getIsInReplicationScope() {
     return this.isInReplicationScope;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplCopyWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplCopyWork.java
index 21da20f..3faed74 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplCopyWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplCopyWork.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 /**
@@ -61,6 +62,10 @@ public class ReplCopyWork extends CopyWork {
 
   private boolean overWrite = false;
 
+  private String dumpDirectory;
+
+  private transient ReplicationMetricCollector metricCollector;
+
   public ReplCopyWork(final Path srcPath, final Path destPath, boolean errorOnSrcEmpty) {
     super(srcPath, destPath, errorOnSrcEmpty);
   }
@@ -70,6 +75,13 @@ public class ReplCopyWork extends CopyWork {
     this.overWrite = overWrite;
   }
 
+  public ReplCopyWork(final Path srcPath, final Path destPath, boolean errorOnSrcEmpty, boolean overWrite,
+                      String dumpDirectory, ReplicationMetricCollector metricCollector) {
+    this(srcPath, destPath, errorOnSrcEmpty);
+    this.overWrite = overWrite;
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+  }
   public void setReadSrcAsFilesList(boolean readSrcAsFilesList) {
     this.readSrcAsFilesList = readSrcAsFilesList;
   }
@@ -118,6 +130,10 @@ public class ReplCopyWork extends CopyWork {
     checkDuplicateCopy = flag;
   }
 
+  public ReplicationMetricCollector getMetricCollector() { return metricCollector; }
+
+  public String getDumpDirectory() { return dumpDirectory; }
+
   public boolean isOverWrite() {
     return overWrite;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java
index 7e16a7c..2d2dc3f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java
@@ -18,10 +18,13 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
+
+import com.google.inject.internal.cglib.core.$MethodInfoTransformer;
 import org.apache.hadoop.hive.metastore.api.ReplLastIdInfo;
 import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
 import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -44,6 +47,9 @@ public class ReplTxnWork implements Serializable {
   private ReplicationSpec replicationSpec;
   private List<WriteEventInfo> writeEventInfos;
   private ReplLastIdInfo replLastIdInfo;
+  private String dumpDirectory;
+  private transient ReplicationMetricCollector metricCollector;
+
 
   /**
    * OperationType.
@@ -73,16 +79,38 @@ public class ReplTxnWork implements Serializable {
     this(replPolicy, dbName, tableName, txnIds, type, null, replicationSpec);
   }
 
+  public ReplTxnWork(String replPolicy, String dbName, String tableName, List<Long> txnIds, OperationType type,
+                     ReplicationSpec replicationSpec, String dumpDirectory, ReplicationMetricCollector metricCollector) {
+    this(replPolicy, dbName, tableName, txnIds, type, null, replicationSpec);
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+  }
+
   public ReplTxnWork(String replPolicy, String dbName, String tableName, Long txnId,
                      OperationType type, ReplicationSpec replicationSpec) {
     this(replPolicy, dbName, tableName, Collections.singletonList(txnId), type, null, replicationSpec);
   }
 
+  public ReplTxnWork(String replPolicy, String dbName, String tableName, Long txnId,
+                     OperationType type, ReplicationSpec replicationSpec,
+                     String dumpDirectory, ReplicationMetricCollector metricCollector) {
+    this(replPolicy, dbName, tableName, Collections.singletonList(txnId), type, null, replicationSpec);
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+  }
+
   public ReplTxnWork(String replPolicy, String dbName, String tableName, OperationType type,
                      List<TxnToWriteId> txnToWriteIdList, ReplicationSpec replicationSpec) {
     this(replPolicy, dbName, tableName, null, type, txnToWriteIdList, replicationSpec);
   }
 
+  public ReplTxnWork(String replPolicy, String dbName, String tableName, OperationType type,
+                     List<TxnToWriteId> txnToWriteIdList, ReplicationSpec replicationSpec,
+                     String dumpDirectory, ReplicationMetricCollector metricCollector) {
+    this(replPolicy, dbName, tableName, null, type, txnToWriteIdList, replicationSpec);
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+  }
   public ReplTxnWork(String dbName, String tableName, List<String> partNames,
                      String validWriteIdList, OperationType type) {
     this.dbName = dbName;
@@ -92,6 +120,18 @@ public class ReplTxnWork implements Serializable {
     this.operation = type;
   }
 
+  public ReplTxnWork(String dbName, String tableName, List<String> partNames,
+                     String validWriteIdList, OperationType type, String dumpDirectory,
+                     ReplicationMetricCollector metricCollector) {
+    this.dbName = dbName;
+    this.tableName = tableName;
+    this.partNames = partNames;
+    this.validWriteIdList = validWriteIdList;
+    this.operation = type;
+    this.dumpDirectory = dumpDirectory;
+    this.metricCollector = metricCollector;
+  }
+
   public void addWriteEventInfo(WriteEventInfo writeEventInfo) {
     if (this.writeEventInfos == null) {
       this.writeEventInfos = new ArrayList<>();
@@ -142,4 +182,12 @@ public class ReplTxnWork implements Serializable {
   public ReplLastIdInfo getReplLastIdInfo() {
     return replLastIdInfo;
   }
+
+  public ReplicationMetricCollector getMetricCollector() {
+    return metricCollector;
+  }
+
+  public String getDumpDirectory() {
+    return dumpDirectory;
+  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java
index 37fbc20..7c4d2c1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorException.java
@@ -55,6 +55,10 @@ public class CommandProcessorException extends Exception {
   public int getResponseCode() {
     return responseCode;
   }
+  
+  public String getCauseMessage() {
+    return getCause() == null ? "" : getCause().getMessage();
+  }
 
   public int getErrorCode() {
     return hiveErrorCode;
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java
index d2222e7..0af6b00 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestRangerDumpTask.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.exec.repl;
 import com.google.gson.Gson;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerExportPolicyList;
 import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerRestClientImpl;
 import org.apache.hadoop.hive.ql.exec.repl.ranger.RangerPolicy;
@@ -78,14 +79,19 @@ public class TestRangerDumpTask {
   @Test
   public void testFailureInvalidAuthProviderEndpoint() throws Exception {
     Mockito.when(conf.get(RANGER_REST_URL)).thenReturn(null);
+    Mockito.when(work.getDbName()).thenReturn("testdb");
+    Mockito.when(work.getCurrentDumpPath()).thenReturn(new Path("/tmp"));
+    Mockito.when(work.getRangerConfigResource()).thenReturn(new URL("file://ranger.xml"));
     int status = task.execute();
-    Assert.assertEquals(40000, status);
+    Assert.assertEquals(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE.getErrorCode(), status);
   }
 
   @Test
   public void testFailureInvalidRangerConfig() throws Exception {
+    Mockito.when(work.getDbName()).thenReturn("testdb");
+    Mockito.when(work.getCurrentDumpPath()).thenReturn(new Path("/tmp"));
     int status = task.execute();
-    Assert.assertEquals(40000, status);
+    Assert.assertEquals(ErrorMsg.REPL_INVALID_CONFIG_FOR_SERVICE.getErrorCode(), status);
   }
 
   @Test
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricUpdateOnFailure.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricUpdateOnFailure.java
new file mode 100644
index 0000000..db951fe
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricUpdateOnFailure.java
@@ -0,0 +1,329 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.parse.repl.metric;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.Constants;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.ddl.DDLWork;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.exec.repl.ReplAck;
+import org.apache.hadoop.hive.ql.exec.repl.ReplDumpWork;
+import org.apache.hadoop.hive.ql.exec.repl.ReplLoadWork;
+import org.apache.hadoop.hive.ql.parse.ExplainConfiguration;
+import org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector;
+import org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector;
+import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector;
+import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector;
+import org.apache.hadoop.hive.ql.parse.repl.metric.event.Progress;
+import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric;
+import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.mockito.Mockito;
+import org.mockito.junit.MockitoJUnitRunner;
+
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+@RunWith(MockitoJUnitRunner.class)
+public class TestReplicationMetricUpdateOnFailure {
+
+  FileSystem fs;
+  HiveConf conf;
+  String TEST_PATH;
+  
+  @Rule
+  public final TestName testName = new TestName();
+  
+  RuntimeException recoverableException = new RuntimeException();
+  RuntimeException nonRecoverableException = new RuntimeException(ErrorMsg.REPL_FAILED_WITH_NON_RECOVERABLE_ERROR.getMsg());
+  
+  @Before
+  public void setup() throws Exception {
+    
+    conf = new HiveConf();
+    conf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
+    conf.set(Constants.SCHEDULED_QUERY_SCHEDULENAME, "repl");
+    conf.set(Constants.SCHEDULED_QUERY_EXECUTIONID, "1");
+    
+    final String tid = 
+            TestReplicationMetricUpdateOnFailure.class.getCanonicalName().toLowerCase().replace('.','_')  
+            + "_" + System.currentTimeMillis();
+    TEST_PATH = System.getProperty("test.warehouse.dir", "/tmp") + Path.SEPARATOR + tid;
+    Path testPath = new Path(TEST_PATH);
+    fs = FileSystem.get(testPath.toUri(), conf);
+    fs.mkdirs(testPath);
+  }
+
+  @Test
+  public void testReplDumpFailure() throws Exception {
+    String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName();
+    IncrementalDumpMetricCollector metricCollector =
+            new IncrementalDumpMetricCollector(null, TEST_PATH, conf);
+    ReplDumpWork replDumpWork = Mockito.mock(ReplDumpWork.class);
+    Mockito.when(replDumpWork.getCurrentDumpPath()).thenReturn(new Path(dumpDir));
+    Mockito.when(replDumpWork.getMetricCollector()).thenReturn(metricCollector);
+    Mockito.when(replDumpWork.dataCopyIteratorsInitialized()).thenThrow(recoverableException, nonRecoverableException);
+    Task replDumpTask = TaskFactory.get(replDumpWork, conf);
+
+    String stageName = "REPL_DUMP";
+    metricCollector.reportStageStart(stageName, new HashMap<>());
+    Assert.assertThrows(RuntimeException.class, () -> replDumpTask.execute());
+    performRecoverableChecks(stageName);
+
+    metricCollector.reportStageStart(stageName, new HashMap<>());
+    Assert.assertThrows(RuntimeException.class, () -> replDumpTask.execute());
+    performNonRecoverableChecks(dumpDir, stageName);
+  }
+  
+  @Test
+  public void testReplDumpRecoverableMissingStage() throws Exception {
+    String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName();
+    MetricCollector.getInstance().deinit();
+    BootstrapDumpMetricCollector metricCollector =
+            new BootstrapDumpMetricCollector(null, TEST_PATH, conf);
+    ReplDumpWork replDumpWork = Mockito.mock(ReplDumpWork.class);
+    Mockito.when(replDumpWork.getMetricCollector()).thenReturn(metricCollector);
+    Mockito.when(replDumpWork.getCurrentDumpPath()).thenReturn(new Path(dumpDir));
+    Mockito.when(replDumpWork.dataCopyIteratorsInitialized()).thenThrow(recoverableException);
+    Task<ReplDumpWork> replDumpTask = TaskFactory.get(replDumpWork, conf);
+
+    //ensure stages are missing initially and execute without reporting start metrics
+    Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size());
+    
+    Assert.assertThrows(RuntimeException.class, () -> replDumpTask.execute());
+    performRecoverableChecks("REPL_DUMP");
+  }
+  
+  @Test
+  public void testReplDumpNonRecoverableMissingStage() throws Exception {
+    String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName();
+    MetricCollector.getInstance().deinit();
+    IncrementalDumpMetricCollector metricCollector =
+            new IncrementalDumpMetricCollector(null, TEST_PATH, conf);
+    ReplDumpWork replDumpWork = Mockito.mock(ReplDumpWork.class);
+    Mockito.when(replDumpWork.getCurrentDumpPath()).thenReturn(new Path(dumpDir));
+    Mockito.when(replDumpWork.getMetricCollector()).thenReturn(metricCollector);
+    Mockito.when(replDumpWork.dataCopyIteratorsInitialized()).thenThrow(nonRecoverableException);
+    Task replDumpTask = TaskFactory.get(replDumpWork, conf);
+
+    //ensure stages are missing initially and execute without reporting start metrics
+    Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size());
+
+    Assert.assertThrows(RuntimeException.class, () -> replDumpTask.execute());
+    performNonRecoverableChecks(dumpDir, "REPL_DUMP");
+  }
+
+  @Test
+  public void testReplLoadFailure() throws Exception {
+    String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName();
+    MetricCollector.getInstance().deinit();
+    IncrementalLoadMetricCollector metricCollector =
+            new IncrementalLoadMetricCollector(null, TEST_PATH, 0, conf);
+    ReplLoadWork replLoadWork = Mockito.mock(ReplLoadWork.class);
+    Mockito.when(replLoadWork.getDumpDirectory()).thenReturn(
+            new Path(dumpDir + Path.SEPARATOR + "test").toString());
+    Mockito.when(replLoadWork.getMetricCollector()).thenReturn(metricCollector);
+    Mockito.when(replLoadWork.getRootTask()).thenThrow(recoverableException, nonRecoverableException);
+    Task replLoadTask = TaskFactory.get(replLoadWork, conf);
+    
+    String stageName = "REPL_LOAD";
+    metricCollector.reportStageStart(stageName, new HashMap<>());
+    Assert.assertThrows(RuntimeException.class, () -> replLoadTask.execute());
+    performRecoverableChecks(stageName);
+
+    metricCollector.reportStageStart(stageName, new HashMap<>());
+    Assert.assertThrows(RuntimeException.class, () -> replLoadTask.execute());
+    performNonRecoverableChecks(dumpDir, stageName);
+  }
+
+  @Test
+  public void testReplLoadRecoverableMissingStage() throws Exception {
+    String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName();
+    MetricCollector.getInstance().deinit();
+    BootstrapLoadMetricCollector metricCollector = 
+            new BootstrapLoadMetricCollector(null, TEST_PATH, 0, conf);
+    ReplLoadWork replLoadWork = Mockito.mock(ReplLoadWork.class);
+    Mockito.when(replLoadWork.getDumpDirectory()).thenReturn(
+            new Path(dumpDir + Path.SEPARATOR + "test").toString());
+    Mockito.when(replLoadWork.getMetricCollector()).thenReturn(metricCollector);
+    Mockito.when(replLoadWork.getRootTask()).thenThrow(recoverableException);
+    Task replLoadTask = TaskFactory.get(replLoadWork, conf);
+
+    //ensure stages are missing initially and execute without reporting start metrics
+    Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size());
+
+    Assert.assertThrows(RuntimeException.class, () -> replLoadTask.execute());
+    performRecoverableChecks("REPL_LOAD");
+  }
+
+  @Test
+  public void testReplLoadNonRecoverableMissingStage() throws Exception {
+    String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName();
+    MetricCollector.getInstance().deinit();
+    IncrementalLoadMetricCollector metricCollector = 
+            new IncrementalLoadMetricCollector(null, TEST_PATH, 0, conf);
+    ReplLoadWork replLoadWork = Mockito.mock(ReplLoadWork.class);
+    Mockito.when(replLoadWork.getDumpDirectory()).thenReturn(
+            new Path(dumpDir + Path.SEPARATOR + "test").toString());
+    Mockito.when(replLoadWork.getMetricCollector()).thenReturn(metricCollector);
+    Mockito.when(replLoadWork.getRootTask()).thenThrow(nonRecoverableException);
+    Task replLoadTask = TaskFactory.get(replLoadWork, conf);
+
+    //ensure stages are missing initially and execute without reporting start metrics
+    Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size());
+
+    Assert.assertThrows(RuntimeException.class, () -> replLoadTask.execute());
+    performNonRecoverableChecks(dumpDir, "REPL_LOAD");
+  }
+
+  /*
+   * Check update on metrics upon intermediate task failures(not repl-dump / repl-load).
+   * Here, DDLTask is used as the intermediate task, other task failures should behave in similar fashion.
+   */
+  @Test
+  public void testDDLTaskFailure() throws Exception {
+    
+    //task-setup for DDL-Task
+    DDLWork ddlWork = Mockito.mock(DDLWork.class);
+    Context context = Mockito.mock(Context.class);
+    Mockito.when(context.getExplainAnalyze()).thenReturn(ExplainConfiguration.AnalyzeState.ANALYZING);
+    Mockito.when(ddlWork.isReplication()).thenReturn(true);
+    String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName();
+    Mockito.when(ddlWork.getDumpDirectory()).thenReturn(dumpDir);
+    Task<DDLWork> ddlTask = TaskFactory.get(ddlWork, conf);
+    ddlTask.initialize(null, null, null, context);
+
+
+    IncrementalLoadMetricCollector metricCollector = new
+            IncrementalLoadMetricCollector(null, TEST_PATH, 1, conf);
+    Mockito.when(ddlWork.getMetricCollector()).thenReturn(metricCollector);
+
+    //setup for 2 runs - first recoverable and second non-recoverable
+    Mockito.when(ddlWork.getDDLDesc()).thenThrow(recoverableException, nonRecoverableException);
+    
+    String stageName = "REPL_LOAD";
+    
+    //test recoverable error during DDL-Task
+    metricCollector.reportStageStart(stageName, new HashMap<>());
+    ddlTask.execute();
+    performRecoverableChecks(stageName);
+   
+    //test non-recoverable error during DDL-Task
+    metricCollector.reportStageStart(stageName, new HashMap<>());
+    ddlTask.execute();
+    performNonRecoverableChecks(dumpDir, stageName);
+  }
+  
+  @Test
+  public void testRecoverableDDLFailureWithStageMissing() throws Exception {
+
+    //task-setup for DDL-Task
+    DDLWork ddlWork = Mockito.mock(DDLWork.class);
+    Context context = Mockito.mock(Context.class);
+    Mockito.when(context.getExplainAnalyze()).thenReturn(ExplainConfiguration.AnalyzeState.ANALYZING);
+    Mockito.when(ddlWork.isReplication()).thenReturn(true);
+    String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName();
+    Mockito.when(ddlWork.getDumpDirectory()).thenReturn(dumpDir);
+    Task<DDLWork> ddlTask = TaskFactory.get(ddlWork, conf);
+    ddlTask.initialize(null, null, null, context);
+
+    MetricCollector.getInstance().deinit();
+    IncrementalLoadMetricCollector metricCollector = new
+            IncrementalLoadMetricCollector(null, TEST_PATH, 1, conf);
+    //ensure stages are missing initially and execute without reporting start metrics
+    Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size());
+
+
+    Map<String, Long> metricMap = new HashMap<>();
+    Mockito.when(ddlWork.getMetricCollector()).thenReturn(metricCollector);
+    Mockito.when(ddlWork.getDDLDesc()).thenThrow(recoverableException);
+
+    //test recoverable error during DDL-Task
+    ddlTask.execute();
+    performRecoverableChecks( "REPL_LOAD");
+  }
+
+  @Test
+  public void testNonRecoverableDDLFailureWithStageMissing() throws Exception {
+
+    //task-setup for DDL-Task
+    DDLWork ddlWork = Mockito.mock(DDLWork.class);
+    Context context = Mockito.mock(Context.class);
+    Mockito.when(context.getExplainAnalyze()).thenReturn(ExplainConfiguration.AnalyzeState.ANALYZING);
+    Mockito.when(ddlWork.isReplication()).thenReturn(true);
+    String dumpDir = TEST_PATH + Path.SEPARATOR + testName.getMethodName();
+    Mockito.when(ddlWork.getDumpDirectory()).thenReturn(dumpDir);
+    Task<DDLWork> ddlTask = TaskFactory.get(ddlWork, conf);
+    ddlTask.initialize(null, null, null, context);
+    
+    MetricCollector.getInstance().deinit();
+    IncrementalLoadMetricCollector metricCollector = new
+            IncrementalLoadMetricCollector(null, TEST_PATH, 1, conf);
+    //ensure stages are missing initially and execute without reporting start metrics
+    Assert.assertEquals(0, MetricCollector.getInstance().getMetrics().size());
+
+    Map<String, Long> metricMap = new HashMap<>();
+    Mockito.when(ddlWork.getMetricCollector()).thenReturn(metricCollector);
+    Mockito.when(ddlWork.getDDLDesc()).thenThrow(nonRecoverableException);
+
+    //test non-recoverable error during DDL-Task, without initializing stage
+    ddlTask.execute();
+    performNonRecoverableChecks(dumpDir, "REPL_LOAD");
+  }
+
+
+  void performRecoverableChecks(String stageName){
+    List<ReplicationMetric> metricList = MetricCollector.getInstance().getMetrics();
+    Assert.assertEquals(1, metricList.size());
+    ReplicationMetric updatedMetric = metricList.get(0);
+    Progress updatedProgress = updatedMetric.getProgress();
+    Assert.assertEquals(Status.FAILED, updatedProgress.getStatus());
+    Assert.assertEquals(1, updatedProgress.getStages().size());
+    Assert.assertEquals(Status.FAILED, updatedProgress.getStageByName(stageName).getStatus());
+    Assert.assertNotEquals(0, updatedProgress.getStageByName(stageName).getEndTime());
+  }
+
+  void performNonRecoverableChecks(String dumpDir, String stageName) throws IOException {
+    List<ReplicationMetric> metricList = MetricCollector.getInstance().getMetrics();
+    Assert.assertEquals(1, metricList.size());
+    ReplicationMetric updatedMetric = metricList.get(0);
+    Progress updatedProgress = updatedMetric.getProgress();
+    Assert.assertEquals(Status.FAILED_ADMIN, updatedProgress.getStatus());
+    Assert.assertEquals(1, updatedProgress.getStages().size());
+    Assert.assertEquals(Status.FAILED_ADMIN, updatedProgress.getStageByName(stageName).getStatus());
+    Assert.assertNotEquals(0, updatedProgress.getStageByName(stageName).getEndTime());
+    Path expectedNonRecoverablePath = new Path(new Path(dumpDir), ReplAck.NON_RECOVERABLE_MARKER.toString());
+    Assert.assertTrue(fs.exists(expectedNonRecoverablePath));
+    fs.delete(expectedNonRecoverablePath, true);
+    MetricCollector.getInstance().deinit();
+  }
+}
diff --git a/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out b/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out
index ec7cf22..6aa789b 100644
--- a/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out
+++ b/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out
@@ -15,4 +15,4 @@ POSTHOOK: Input: default@part_whitelist_test
 PREHOOK: query: ALTER TABLE part_whitelist_test ADD PARTITION (ds='1,2,3,4')
 PREHOOK: type: ALTERTABLE_ADDPARTS
 PREHOOK: Output: default@part_whitelist_test
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Partition value '1,2,3,4' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'.  (configure with metastore.partition.name.whitelist.pattern))
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Partition value '1,2,3,4' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'.  (configure with metastore.partition.name.whitelist.pattern))
diff --git a/ql/src/test/results/clientnegative/addpart1.q.out b/ql/src/test/results/clientnegative/addpart1.q.out
index 864709f..a1c6549 100644
--- a/ql/src/test/results/clientnegative/addpart1.q.out
+++ b/ql/src/test/results/clientnegative/addpart1.q.out
@@ -23,4 +23,4 @@ b=f/c=s
 PREHOOK: query: alter table addpart1 add partition (b='f', c='')
 PREHOOK: type: ALTERTABLE_ADDPARTS
 PREHOOK: Output: default@addpart1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. partition spec is invalid; field c does not exist or is empty
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. partition spec is invalid; field c does not exist or is empty
diff --git a/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out b/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out
index 98d99a9..4263945 100644
--- a/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out
+++ b/ql/src/test/results/clientnegative/allow_change_col_type_par_neg.q.out
@@ -14,5 +14,5 @@ PREHOOK: query: alter table t1 change column c1 c1 smallint
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@t1
 PREHOOK: Output: default@t1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 c1
diff --git a/ql/src/test/results/clientnegative/alter_external_acid.q.out b/ql/src/test/results/clientnegative/alter_external_acid.q.out
index 8005676..84a7bc2 100644
--- a/ql/src/test/results/clientnegative/alter_external_acid.q.out
+++ b/ql/src/test/results/clientnegative/alter_external_acid.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table acid_external set TBLPROPERTIES ('transactional'='tr
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@acid_external
 PREHOOK: Output: default@acid_external
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. default.acid_external cannot be declared transactional because it's an external table
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. default.acid_external cannot be declared transactional because it's an external table
diff --git a/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out b/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out
index b226f8e..845540d 100644
--- a/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out
+++ b/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out
@@ -17,4 +17,4 @@ PREHOOK: query: alter table alter_partition_change_col_dup_col change c2 c1 deci
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@alter_partition_change_col_dup_col
 PREHOOK: Output: default@alter_partition_change_col_dup_col
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Duplicate column name: c1
+FAILED: Execution Error, return code 10036 from org.apache.hadoop.hive.ql.ddl.DDLTask. Duplicate column name: c1
diff --git a/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out b/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out
index f3a8069..9f04f88 100644
--- a/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out
+++ b/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out
@@ -17,4 +17,4 @@ PREHOOK: query: alter table alter_partition_change_col_nonexist change c3 c4 dec
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@alter_partition_change_col_nonexist
 PREHOOK: Output: default@alter_partition_change_col_nonexist
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference c3
+FAILED: Execution Error, return code 10002 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference c3
diff --git a/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out b/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out
index b5213b1..8a7857e 100644
--- a/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out
+++ b/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out
@@ -23,4 +23,4 @@ PREHOOK: query: ALTER TABLE part_whitelist_test PARTITION (ds='1') rename to par
 PREHOOK: type: ALTERTABLE_RENAMEPART
 PREHOOK: Input: default@part_whitelist_test
 PREHOOK: Output: default@part_whitelist_test@ds=1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition value '1,2,3' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'.  (configure with metastore.partition.name.whitelist.pattern)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition value '1,2,3' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'.  (configure with metastore.partition.name.whitelist.pattern)
diff --git a/ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out b/ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out
index be6fc9b..0314fa1 100644
--- a/ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out
+++ b/ql/src/test/results/clientnegative/alter_rename_partition_failure.q.out
@@ -34,4 +34,4 @@ POSTHOOK: Lineage: alter_rename_partition PARTITION(pcol1=old_part1,pcol2=old_pa
 PREHOOK: query: alter table alter_rename_partition partition (pCol1='nonexist_part1', pcol2='nonexist_part2') rename to partition (pCol1='new_part1', pcol2='new_part2')
 PREHOOK: type: ALTERTABLE_RENAMEPART
 PREHOOK: Input: default@alter_rename_partition
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Rename partition: source partition [pcol1=nonexist_part1/pcol2=nonexist_part2] does not exist.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Rename partition: source partition [pcol1=nonexist_part1/pcol2=nonexist_part2] does not exist.
diff --git a/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out b/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out
index f6ccedd..e0eb35d 100644
--- a/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out
+++ b/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out
@@ -35,4 +35,4 @@ PREHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:'
 PREHOOK: type: ALTERTABLE_RENAMEPART
 PREHOOK: Input: default@alter_rename_partition
 PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition already exists:default.alter_rename_partition.[old_part1:, old_part2:]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition already exists:default.alter_rename_partition.[old_part1:, old_part2:]
diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_duplicate_pk.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_duplicate_pk.q.out
index ce62f1f..2417cdf 100644
--- a/ql/src/test/results/clientnegative/alter_table_constraint_duplicate_pk.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_constraint_duplicate_pk.q.out
@@ -8,4 +8,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table1
 PREHOOK: query: alter table table1 add constraint pk4 primary key (b) disable novalidate rely
 PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message: Primary key already exists for: hive.default.table1)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message: Primary key already exists for: hive.default.table1)
diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col1.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col1.q.out
index d36cf77..2c319c9 100644
--- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col1.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col1.q.out
@@ -16,4 +16,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table2
 PREHOOK: query: alter table table2 add constraint fk1 foreign key (c) references table1(a) disable novalidate
 PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child column not found: c)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child column not found: c)
diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col2.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col2.q.out
index d77a76bf..b047681 100644
--- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col2.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_col2.q.out
@@ -16,4 +16,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table2
 PREHOOK: query: alter table table2 add constraint fk1 foreign key (b) references table1(c) disable novalidate
 PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent column not found: c)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent column not found: c)
diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out
index 869f1ed..c73371f 100644
--- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl1.q.out
@@ -16,4 +16,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table2
 PREHOOK: query: alter table table3 add constraint fk1 foreign key (c) references table1(a) disable novalidate
 PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child table not found: table3)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child table not found: table3)
diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl2.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl2.q.out
index 80c0f30..2604954 100644
--- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl2.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_fk_tbl2.q.out
@@ -16,4 +16,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table2
 PREHOOK: query: alter table table2 add constraint fk1 foreign key (b) references table3(a) disable novalidate
 PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent table not found: table3)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent table not found: table3)
diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_col.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_col.q.out
index f9532de..090fc65 100644
--- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_col.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_col.q.out
@@ -8,4 +8,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table1
 PREHOOK: query: alter table table1 add constraint pk1  primary key (c) disable novalidate
 PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent column not found: c)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent column not found: c)
diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out
index 56d87d5..e37850a 100644
--- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_pk_tbl.q.out
@@ -16,4 +16,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table2
 PREHOOK: query: alter table table3 add constraint pk3 primary key (a) disable novalidate rely
 PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent table not found: table3)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Parent table not found: table3)
diff --git a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_ref.q.out b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_ref.q.out
index f66641f..0cda4547 100644
--- a/ql/src/test/results/clientnegative/alter_table_constraint_invalid_ref.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_constraint_invalid_ref.q.out
@@ -16,4 +16,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table2
 PREHOOK: query: alter table table2 add constraint fk1 foreign key (a) references table1(b) disable novalidate
 PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references b:string; but no corresponding primary key or unique key exists. Possible keys: [a:string;])
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references b:string; but no corresponding primary key or unique key exists. Possible keys: [a:string;])
diff --git a/ql/src/test/results/clientnegative/alter_table_wrong_db.q.out b/ql/src/test/results/clientnegative/alter_table_wrong_db.q.out
index 07641b4..cf35c42 100644
--- a/ql/src/test/results/clientnegative/alter_table_wrong_db.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_wrong_db.q.out
@@ -22,4 +22,4 @@ PREHOOK: query: alter table bad_rename1.rename1 rename to bad_db_notexists.renam
 PREHOOK: type: ALTERTABLE_RENAME
 PREHOOK: Input: bad_rename1@rename1
 PREHOOK: Output: bad_rename1@rename1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Unable to change partition or table. Object database hive.bad_db_notexists does not exist. Check metastore logs for detailed stack.
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Unable to change partition or table. Object database hive.bad_db_notexists does not exist. Check metastore logs for detailed stack.
diff --git a/ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out b/ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out
index 54b8a3f..702bb32 100644
--- a/ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_wrong_location2.q.out
@@ -11,4 +11,4 @@ PREHOOK: type: ALTERTABLE_LOCATION
 PREHOOK: Input: default@testwrongloc
 PREHOOK: Output: default@testwrongloc
 #### A masked pattern was here ####
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. relative/testwrongloc  is not absolute.  Please specify a complete absolute uri.
+FAILED: Execution Error, return code 10244 from org.apache.hadoop.hive.ql.ddl.DDLTask. relative/testwrongloc  is not absolute.  Please specify a complete absolute uri.
diff --git a/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out b/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out
index 7aa238d..cdf9e58 100644
--- a/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out
+++ b/ql/src/test/results/clientnegative/alter_table_wrong_regex.q.out
@@ -18,4 +18,4 @@ PREHOOK: query: alter table aa set serdeproperties ("input.regex" = "[^\\](.*)",
 PREHOOK: type: ALTERTABLE_SERDEPROPERTIES
 PREHOOK: Input: default@aa
 PREHOOK: Output: default@aa
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. at least one column must be specified for the table
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. at least one column must be specified for the table
diff --git a/ql/src/test/results/clientnegative/altern1.q.out b/ql/src/test/results/clientnegative/altern1.q.out
index 310b4bf..a2de13f 100644
--- a/ql/src/test/results/clientnegative/altern1.q.out
+++ b/ql/src/test/results/clientnegative/altern1.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table altern1 replace columns(a int, b int, ds string)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@altern1
 PREHOOK: Output: default@altern1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition column name ds conflicts with table columns.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition column name ds conflicts with table columns.
diff --git a/ql/src/test/results/clientnegative/archive1.q.out b/ql/src/test/results/clientnegative/archive1.q.out
index 8b87e8a..dece98d 100644
--- a/ql/src/test/results/clientnegative/archive1.q.out
+++ b/ql/src/test/results/clientnegative/archive1.q.out
@@ -32,4 +32,4 @@ PREHOOK: query: ALTER TABLE srcpart_archived ARCHIVE PARTITION (ds='2008-04-08',
 PREHOOK: type: ALTERTABLE_ARCHIVE
 PREHOOK: Input: default@srcpart_archived
 PREHOOK: Output: default@srcpart_archived@ds=2008-04-08/hr=12
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition(s) already archived
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition(s) already archived
diff --git a/ql/src/test/results/clientnegative/archive2.q.out b/ql/src/test/results/clientnegative/archive2.q.out
index e2ca7d3..41e66f4 100644
--- a/ql/src/test/results/clientnegative/archive2.q.out
+++ b/ql/src/test/results/clientnegative/archive2.q.out
@@ -28,4 +28,4 @@ PREHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr=
 PREHOOK: type: ALTERTABLE_UNARCHIVE
 PREHOOK: Input: default@tstsrcpart
 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is not archived.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is not archived.
diff --git a/ql/src/test/results/clientnegative/archive_multi1.q.out b/ql/src/test/results/clientnegative/archive_multi1.q.out
index 342b77f..541ad4a 100644
--- a/ql/src/test/results/clientnegative/archive_multi1.q.out
+++ b/ql/src/test/results/clientnegative/archive_multi1.q.out
@@ -49,4 +49,4 @@ PREHOOK: type: ALTERTABLE_ARCHIVE
 PREHOOK: Input: default@tstsrcpart
 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11
 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition(s) already archived
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition(s) already archived
diff --git a/ql/src/test/results/clientnegative/archive_multi2.q.out b/ql/src/test/results/clientnegative/archive_multi2.q.out
index a4680d6..f226cc2 100644
--- a/ql/src/test/results/clientnegative/archive_multi2.q.out
+++ b/ql/src/test/results/clientnegative/archive_multi2.q.out
@@ -42,4 +42,4 @@ PREHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr=
 PREHOOK: type: ALTERTABLE_UNARCHIVE
 PREHOOK: Input: default@tstsrcpart
 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is not archived.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is not archived.
diff --git a/ql/src/test/results/clientnegative/archive_multi3.q.out b/ql/src/test/results/clientnegative/archive_multi3.q.out
index 0ad82dd..8bd3488 100644
--- a/ql/src/test/results/clientnegative/archive_multi3.q.out
+++ b/ql/src/test/results/clientnegative/archive_multi3.q.out
@@ -47,4 +47,4 @@ PREHOOK: type: ALTERTABLE_ARCHIVE
 PREHOOK: Input: default@tstsrcpart
 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11
 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Conflict with existing archive ds=2008-04-08/hr=12
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Conflict with existing archive ds=2008-04-08/hr=12
diff --git a/ql/src/test/results/clientnegative/archive_multi4.q.out b/ql/src/test/results/clientnegative/archive_multi4.q.out
index 24f3094..18a48e7 100644
--- a/ql/src/test/results/clientnegative/archive_multi4.q.out
+++ b/ql/src/test/results/clientnegative/archive_multi4.q.out
@@ -48,4 +48,4 @@ PREHOOK: query: ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08', hr='1
 PREHOOK: type: ALTERTABLE_ARCHIVE
 PREHOOK: Input: default@tstsrcpart
 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Conflict with existing archive ds=2008-04-08
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Conflict with existing archive ds=2008-04-08
diff --git a/ql/src/test/results/clientnegative/archive_multi5.q.out b/ql/src/test/results/clientnegative/archive_multi5.q.out
index 7c0cc90..d8c54fa 100644
--- a/ql/src/test/results/clientnegative/archive_multi5.q.out
+++ b/ql/src/test/results/clientnegative/archive_multi5.q.out
@@ -47,4 +47,4 @@ PREHOOK: type: ALTERTABLE_UNARCHIVE
 PREHOOK: Input: default@tstsrcpart
 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11
 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=11 is not archived.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=11 is not archived.
diff --git a/ql/src/test/results/clientnegative/archive_multi6.q.out b/ql/src/test/results/clientnegative/archive_multi6.q.out
index 3ab6e01..88fb3a9 100644
--- a/ql/src/test/results/clientnegative/archive_multi6.q.out
+++ b/ql/src/test/results/clientnegative/archive_multi6.q.out
@@ -48,4 +48,4 @@ PREHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08', hr=
 PREHOOK: type: ALTERTABLE_UNARCHIVE
 PREHOOK: Input: default@tstsrcpart
 PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is archived at level 1, and given partspec only has 2 specs.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Partition ds=2008-04-08/hr=12 is archived at level 1, and given partspec only has 2 specs.
diff --git a/ql/src/test/results/clientnegative/authorization_cannot_create_default_role.q.out b/ql/src/test/results/clientnegative/authorization_cannot_create_default_role.q.out
index 502088f..78543d2 100644
--- a/ql/src/test/results/clientnegative/authorization_cannot_create_default_role.q.out
+++ b/ql/src/test/results/clientnegative/authorization_cannot_create_default_role.q.out
@@ -4,4 +4,4 @@ POSTHOOK: query: set role ADMIN
 POSTHOOK: type: SHOW_ROLES
 PREHOOK: query: create role default
 PREHOOK: type: CREATEROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Role name cannot be one of the reserved roles: [ALL, DEFAULT, NONE]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Role name cannot be one of the reserved roles: [ALL, DEFAULT, NONE]
diff --git a/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out b/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out
index 4efc7d4..e807c85 100644
--- a/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out
+++ b/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out
@@ -55,4 +55,4 @@ public
 testrole
 PREHOOK: query: create role TESTRoLE
 PREHOOK: type: CREATEROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error create role: Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException Role testrole already exists.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error create role: Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException Role testrole already exists.
diff --git a/ql/src/test/results/clientnegative/authorization_create_role_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_create_role_no_admin.q.out
index dedb273..1818d07 100644
--- a/ql/src/test/results/clientnegative/authorization_create_role_no_admin.q.out
+++ b/ql/src/test/results/clientnegative/authorization_create_role_no_admin.q.out
@@ -1,3 +1,3 @@
 PREHOOK: query: create role r1
 PREHOOK: type: CREATEROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed to add roles. User has to belong to ADMIN role and have it as current role, for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed to add roles. User has to belong to ADMIN role and have it as current role, for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out b/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out
index 027221c..6837887 100644
--- a/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out
+++ b/ql/src/test/results/clientnegative/authorization_drop_admin_role.q.out
@@ -4,4 +4,4 @@ POSTHOOK: query: set role admin
 POSTHOOK: type: SHOW_ROLES
 PREHOOK: query: drop role admin
 PREHOOK: type: DROPROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error dropping role: public,admin roles can't be dropped.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error dropping role: public,admin roles can't be dropped.
diff --git a/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out
index 2850c77..b086ec4 100644
--- a/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out
+++ b/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out
@@ -22,4 +22,4 @@ POSTHOOK: type: SHOW_ROLES
 public
 PREHOOK: query: drop role r1
 PREHOOK: type: DROPROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_admin_user is not allowed to drop role. User has to belong to ADMIN role and have it as current role, for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_admin_user is not allowed to drop role. User has to belong to ADMIN role and have it as current role, for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_fail_1.q.out b/ql/src/test/results/clientnegative/authorization_fail_1.q.out
index 16cf00c..0943a43 100644
--- a/ql/src/test/results/clientnegative/authorization_fail_1.q.out
+++ b/ql/src/test/results/clientnegative/authorization_fail_1.q.out
@@ -15,4 +15,4 @@ POSTHOOK: Output: default@authorization_fail_1
 PREHOOK: query: grant Create on table authorization_fail_1 to user hive_test_user
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@authorization_fail_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException CREATE is already granted on table [default,authorization_fail_1] by hive_test_user)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException CREATE is already granted on table [default,authorization_fail_1] by hive_test_user)
diff --git a/ql/src/test/results/clientnegative/authorization_fail_8.q.out b/ql/src/test/results/clientnegative/authorization_fail_8.q.out
index b8bd912..df3c5ba 100644
--- a/ql/src/test/results/clientnegative/authorization_fail_8.q.out
+++ b/ql/src/test/results/clientnegative/authorization_fail_8.q.out
@@ -43,4 +43,4 @@ default	authorization_fail			user2	USER	SELECT	false	-1	user1
 PREHOOK: query: GRANT SELECT ON authorization_fail TO USER user3
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@authorization_fail
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant] on Object [type=TABLE_OR_VIEW, name=default.authorization_fail]]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant] on Object [type=TABLE_OR_VIEW, name=default.authorization_fail]]
diff --git a/ql/src/test/results/clientnegative/authorization_grant_group.q.out b/ql/src/test/results/clientnegative/authorization_grant_group.q.out
index eb638f2..ce5344d 100644
--- a/ql/src/test/results/clientnegative/authorization_grant_group.q.out
+++ b/ql/src/test/results/clientnegative/authorization_grant_group.q.out
@@ -9,4 +9,4 @@ POSTHOOK: Output: default@table_gg
 PREHOOK: query: GRANT INSERT ON table_gg TO group g1
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@table_gg
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid principal type in principal Principal [name=g1, type=GROUP]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid principal type in principal Principal [name=g1, type=GROUP]
diff --git a/ql/src/test/results/clientnegative/authorization_grant_table_allpriv.q.out b/ql/src/test/results/clientnegative/authorization_grant_table_allpriv.q.out
index fe075e8..39c2945 100644
--- a/ql/src/test/results/clientnegative/authorization_grant_table_allpriv.q.out
+++ b/ql/src/test/results/clientnegative/authorization_grant_table_allpriv.q.out
@@ -15,4 +15,4 @@ POSTHOOK: Output: default@table_priv_allf
 PREHOOK: query: GRANT ALL ON table_priv_allf TO USER user3
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@table_priv_allf
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant, UPDATE with grant, DELETE with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_allf]]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant, UPDATE with grant, DELETE with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_allf]]
diff --git a/ql/src/test/results/clientnegative/authorization_grant_table_dup.q.out b/ql/src/test/results/clientnegative/authorization_grant_table_dup.q.out
index c9f8295..c2b2d7e 100644
--- a/ql/src/test/results/clientnegative/authorization_grant_table_dup.q.out
+++ b/ql/src/test/results/clientnegative/authorization_grant_table_dup.q.out
@@ -22,4 +22,4 @@ default	tauth_gdup			user1	USER	UPDATE	true	-1	user1
 PREHOOK: query: GRANT INSERT ON tauth_gdup TO USER user1
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@tauth_gdup
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting privileges: Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException INSERT is already granted on table [default,tauth_gdup] by user1
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting privileges: Got exception: org.apache.hadoop.hive.metastore.api.InvalidObjectException INSERT is already granted on table [default,tauth_gdup] by user1
diff --git a/ql/src/test/results/clientnegative/authorization_grant_table_fail1.q.out b/ql/src/test/results/clientnegative/authorization_grant_table_fail1.q.out
index 071e6e3..567a4bf 100644
--- a/ql/src/test/results/clientnegative/authorization_grant_table_fail1.q.out
+++ b/ql/src/test/results/clientnegative/authorization_grant_table_fail1.q.out
@@ -9,4 +9,4 @@ POSTHOOK: Output: default@table_priv_gfail1
 PREHOOK: query: GRANT INSERT ON table_priv_gfail1 TO USER user3
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@table_priv_gfail1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[INSERT with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_gfail1]]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[INSERT with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_gfail1]]
diff --git a/ql/src/test/results/clientnegative/authorization_grant_table_fail_nogrant.q.out b/ql/src/test/results/clientnegative/authorization_grant_table_fail_nogrant.q.out
index 3f37585..37f06ac 100644
--- a/ql/src/test/results/clientnegative/authorization_grant_table_fail_nogrant.q.out
+++ b/ql/src/test/results/clientnegative/authorization_grant_table_fail_nogrant.q.out
@@ -15,4 +15,4 @@ POSTHOOK: Output: default@table_priv_gfail1
 PREHOOK: query: GRANT INSERT ON table_priv_gfail1 TO USER user3
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@table_priv_gfail1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[INSERT with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_gfail1]]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[INSERT with grant] on Object [type=TABLE_OR_VIEW, name=default.table_priv_gfail1]]
diff --git a/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out b/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out
index 996fa8f..a56fac9 100644
--- a/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out
+++ b/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out
@@ -9,4 +9,4 @@ POSTHOOK: Output: default@authorization_invalid_v2
 PREHOOK: query: grant lock on table authorization_invalid_v2 to user hive_test_user
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@authorization_invalid_v2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unsupported privilege type LOCK
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unsupported privilege type LOCK
diff --git a/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out b/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out
index 2ec51e6..419987f 100644
--- a/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out
+++ b/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out
@@ -61,4 +61,4 @@ POSTHOOK: type: SHOW_ROLES
 PREHOOK: query: grant all on table tpriv_current_role to user user5
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@tpriv_current_role
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant, INSERT with grant, UPDATE with grant, DELETE with grant] on Object [type=TABLE_OR_VIEW, name=default.tpriv_current_role]]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation GRANT_PRIVILEGE [[SELECT with grant, INSERT with grant, UPDATE with grant, DELETE with grant] on Object [type=TABLE_OR_VIEW, name=default.tpriv_current_role]]
diff --git a/ql/src/test/results/clientnegative/authorization_public_create.q.out b/ql/src/test/results/clientnegative/authorization_public_create.q.out
index 669cffe..16e3cb3 100644
--- a/ql/src/test/results/clientnegative/authorization_public_create.q.out
+++ b/ql/src/test/results/clientnegative/authorization_public_create.q.out
@@ -1,3 +1,3 @@
 PREHOOK: query: create role public
 PREHOOK: type: CREATEROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public role implicitly exists. It can't be created.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public role implicitly exists. It can't be created.)
diff --git a/ql/src/test/results/clientnegative/authorization_public_drop.q.out b/ql/src/test/results/clientnegative/authorization_public_drop.q.out
index e1b538d..a218682 100644
--- a/ql/src/test/results/clientnegative/authorization_public_drop.q.out
+++ b/ql/src/test/results/clientnegative/authorization_public_drop.q.out
@@ -1,3 +1,3 @@
 PREHOOK: query: drop role public
 PREHOOK: type: DROPROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public,admin roles can't be dropped.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public,admin roles can't be dropped.)
diff --git a/ql/src/test/results/clientnegative/authorization_revoke_table_fail1.q.out b/ql/src/test/results/clientnegative/authorization_revoke_table_fail1.q.out
index d7454f7..590fb8f 100644
--- a/ql/src/test/results/clientnegative/authorization_revoke_table_fail1.q.out
+++ b/ql/src/test/results/clientnegative/authorization_revoke_table_fail1.q.out
@@ -15,5 +15,5 @@ POSTHOOK: Output: default@table_priv_rfail1
 PREHOOK: query: REVOKE INSERT ON TABLE table_priv_rfail1 FROM USER user2
 PREHOOK: type: REVOKE_PRIVILEGE
 PREHOOK: Output: default@table_priv_rfail1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot find privilege Privilege [name=INSERT, columns=null] for Principal [name=user2, type=USER] on Object [type=TABLE_OR_VIEW, name=default.table_priv_rfail1] granted by user3
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot find privilege Privilege [name=INSERT, columns=null] for Principal [name=user2, type=USER] on Object [type=TABLE_OR_VIEW, name=default.table_priv_rfail1] granted by user3
 
diff --git a/ql/src/test/results/clientnegative/authorization_revoke_table_fail2.q.out b/ql/src/test/results/clientnegative/authorization_revoke_table_fail2.q.out
index c7030ca..2c24fb6 100644
--- a/ql/src/test/results/clientnegative/authorization_revoke_table_fail2.q.out
+++ b/ql/src/test/results/clientnegative/authorization_revoke_table_fail2.q.out
@@ -27,5 +27,5 @@ POSTHOOK: Output: default@table_priv_rfai2
 PREHOOK: query: REVOKE INSERT ON TABLE table_priv_rfai2 FROM USER user2
 PREHOOK: type: REVOKE_PRIVILEGE
 PREHOOK: Output: default@table_priv_rfai2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot find privilege Privilege [name=INSERT, columns=null] for Principal [name=user2, type=USER] on Object [type=TABLE_OR_VIEW, name=default.table_priv_rfai2] granted by user3
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot find privilege Privilege [name=INSERT, columns=null] for Principal [name=user2, type=USER] on Object [type=TABLE_OR_VIEW, name=default.table_priv_rfai2] granted by user3
 
diff --git a/ql/src/test/results/clientnegative/authorization_role_case.q.out b/ql/src/test/results/clientnegative/authorization_role_case.q.out
index adb6d3c..fb200cf9 100644
--- a/ql/src/test/results/clientnegative/authorization_role_case.q.out
+++ b/ql/src/test/results/clientnegative/authorization_role_case.q.out
@@ -31,4 +31,4 @@ POSTHOOK: Output: default@t1
 PREHOOK: query: grant UPDATE  on table t1 to role mixcaserole2
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@t1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role mixcaserole2 does not exist)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role mixcaserole2 does not exist)
diff --git a/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out b/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out
index e7f3a31..5d7fe3e 100644
--- a/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out
+++ b/ql/src/test/results/clientnegative/authorization_role_cycles1.q.out
@@ -16,4 +16,4 @@ POSTHOOK: query: grant role role1 to role role2
 POSTHOOK: type: GRANT_ROLE
 PREHOOK: query: grant role role2 to role role1
 PREHOOK: type: GRANT_ROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Cannot grant role role1 to role2 as role2 already belongs to the role role1. (no cycles allowed)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Cannot grant role role1 to role2 as role2 already belongs to the role role1. (no cycles allowed)
diff --git a/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out b/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out
index 4f20b84..eedabe8 100644
--- a/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out
+++ b/ql/src/test/results/clientnegative/authorization_role_cycles2.q.out
@@ -40,4 +40,4 @@ POSTHOOK: query: grant role role5 to role role4
 POSTHOOK: type: GRANT_ROLE
 PREHOOK: query: grant role role2 to role role4
 PREHOOK: type: GRANT_ROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Cannot grant role role4 to role2 as role2 already belongs to the role role4. (no cycles allowed)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Cannot grant role role4 to role2 as role2 already belongs to the role role4. (no cycles allowed)
diff --git a/ql/src/test/results/clientnegative/authorization_role_grant.q.out b/ql/src/test/results/clientnegative/authorization_role_grant.q.out
index daef930..0cabd84 100644
--- a/ql/src/test/results/clientnegative/authorization_role_grant.q.out
+++ b/ql/src/test/results/clientnegative/authorization_role_grant.q.out
@@ -31,4 +31,4 @@ POSTHOOK: query: set role role_noadmin
 POSTHOOK: type: SHOW_ROLES
 PREHOOK: query: grant  src_role_wadmin to user user3
 PREHOOK: type: GRANT_ROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : user2 is not allowed to grant role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : user2 is not allowed to grant role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_role_grant2.q.out b/ql/src/test/results/clientnegative/authorization_role_grant2.q.out
index e549580..896dbe9 100644
--- a/ql/src/test/results/clientnegative/authorization_role_grant2.q.out
+++ b/ql/src/test/results/clientnegative/authorization_role_grant2.q.out
@@ -48,4 +48,4 @@ POSTHOOK: query: set role src_role_wadmin
 POSTHOOK: type: SHOW_ROLES
 PREHOOK: query: grant  src_role_wadmin to user user3
 PREHOOK: type: GRANT_ROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : user2 is not allowed to grant role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : user2 is not allowed to grant role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_role_grant_nosuchrole.q.out b/ql/src/test/results/clientnegative/authorization_role_grant_nosuchrole.q.out
index fcb2ec9..277bff5 100644
--- a/ql/src/test/results/clientnegative/authorization_role_grant_nosuchrole.q.out
+++ b/ql/src/test/results/clientnegative/authorization_role_grant_nosuchrole.q.out
@@ -8,4 +8,4 @@ POSTHOOK: query: create role role1
 POSTHOOK: type: CREATEROLE
 PREHOOK: query: grant role1 to role nosuchrole
 PREHOOK: type: GRANT_ROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role nosuchrole does not exist
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting role: Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role nosuchrole does not exist
diff --git a/ql/src/test/results/clientnegative/authorization_role_grant_otherrole.q.out b/ql/src/test/results/clientnegative/authorization_role_grant_otherrole.q.out
index bb4e23e..79c0c57 100644
--- a/ql/src/test/results/clientnegative/authorization_role_grant_otherrole.q.out
+++ b/ql/src/test/results/clientnegative/authorization_role_grant_otherrole.q.out
@@ -8,4 +8,4 @@ POSTHOOK: query: create role accounting
 POSTHOOK: type: CREATEROLE
 PREHOOK: query: show role grant role accounting
 PREHOOK: type: SHOW_ROLE_GRANT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error getting role grant information for user accounting: User : user1 is not allowed check privileges of a role it does not belong to : accounting. User has to belong to ADMIN role and have it as current role, for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error getting role grant information for user accounting: User : user1 is not allowed check privileges of a role it does not belong to : accounting. User has to belong to ADMIN role and have it as current role, for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_role_grant_otheruser.q.out b/ql/src/test/results/clientnegative/authorization_role_grant_otheruser.q.out
index 5422b19..72cd42b 100644
--- a/ql/src/test/results/clientnegative/authorization_role_grant_otheruser.q.out
+++ b/ql/src/test/results/clientnegative/authorization_role_grant_otheruser.q.out
@@ -19,4 +19,4 @@ POSTHOOK: type: SHOW_ROLE_GRANT
 public	false	-1	
 PREHOOK: query: show role grant user ruser2
 PREHOOK: type: SHOW_ROLE_GRANT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error getting role grant information for user ruser2: User : ruser1 is not allowed check privileges of another user : ruser2. User has to belong to ADMIN role and have it as current role, for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error getting role grant information for user ruser2: User : ruser1 is not allowed check privileges of another user : ruser2. User has to belong to ADMIN role and have it as current role, for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out b/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out
index fb46d43..fd60dca 100644
--- a/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out
+++ b/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out
@@ -1,3 +1,3 @@
 PREHOOK: query: set role nosuchroleexists
 PREHOOK: type: SHOW_ROLES
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. hive_test_user doesn't belong to role nosuchroleexists
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. hive_test_user doesn't belong to role nosuchroleexists
diff --git a/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out b/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out
index dad672d..1dbcc35 100644
--- a/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out
+++ b/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out
@@ -20,4 +20,4 @@ POSTHOOK: query: set role public
 POSTHOOK: type: SHOW_ROLES
 PREHOOK: query: set role nosuchroleexists
 PREHOOK: type: SHOW_ROLES
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. user2 doesn't belong to role nosuchroleexists
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. user2 doesn't belong to role nosuchroleexists
diff --git a/ql/src/test/results/clientnegative/authorization_show_grant_otherrole.q.out b/ql/src/test/results/clientnegative/authorization_show_grant_otherrole.q.out
index 198986a..47d24de 100644
--- a/ql/src/test/results/clientnegative/authorization_show_grant_otherrole.q.out
+++ b/ql/src/test/results/clientnegative/authorization_show_grant_otherrole.q.out
@@ -8,4 +8,4 @@ POSTHOOK: query: create role role1
 POSTHOOK: type: CREATEROLE
 PREHOOK: query: show grant role role1
 PREHOOK: type: SHOW_GRANT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of a role it does not belong to : role1. User has to belong to ADMIN role and have it as current role, for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of a role it does not belong to : role1. User has to belong to ADMIN role and have it as current role, for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_all.q.out b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_all.q.out
index bd510aa..8ecf33e 100644
--- a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_all.q.out
+++ b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_all.q.out
@@ -1,3 +1,3 @@
 PREHOOK: query: show grant
 PREHOOK: type: SHOW_GRANT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 has to specify a user name or role in the show grant. User has to belong to ADMIN role and have it as current role, for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 has to specify a user name or role in the show grant. User has to belong to ADMIN role and have it as current role, for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_alltabs.q.out b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_alltabs.q.out
index e5479fd..1135a10 100644
--- a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_alltabs.q.out
+++ b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_alltabs.q.out
@@ -1,3 +1,3 @@
 PREHOOK: query: show grant user user2
 PREHOOK: type: SHOW_GRANT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of another user : user2. User has to belong to ADMIN role and have it as current role, for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of another user : user2. User has to belong to ADMIN role and have it as current role, for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_wtab.q.out b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_wtab.q.out
index 1b137f1..dd7eee0 100644
--- a/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_wtab.q.out
+++ b/ql/src/test/results/clientnegative/authorization_show_grant_otheruser_wtab.q.out
@@ -8,4 +8,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
 PREHOOK: query: show grant user user2 on table t1
 PREHOOK: type: SHOW_GRANT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of another user : user2. User has to belong to ADMIN role and have it as current role, for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error showing privileges: User : user1 is not allowed check privileges of another user : user2. User has to belong to ADMIN role and have it as current role, for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out
index f5ce765..9100689 100644
--- a/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out
+++ b/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out
@@ -1,3 +1,3 @@
 PREHOOK: query: show principals role1
 PREHOOK: type: SHOW_ROLE_PRINCIPALS
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed get principals in a role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed get principals in a role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out
index ea46d10..1600a2e 100644
--- a/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out
+++ b/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out
@@ -1,3 +1,3 @@
 PREHOOK: query: show roles
 PREHOOK: type: SHOW_ROLES
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed to list roles. User has to belong to ADMIN role and have it as current role, for this action.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current user : hive_test_user is not allowed to list roles. User has to belong to ADMIN role and have it as current role, for this action.
diff --git a/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out b/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out
index 21af3b1..2c341bd 100644
--- a/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out
+++ b/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out
@@ -9,4 +9,4 @@ POSTHOOK: Output: default@t1
 PREHOOK: query: grant ALL on t1 to role nosuchrole
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@t1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting privileges: Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role nosuchrole does not exist
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error granting privileges: Got exception: org.apache.hadoop.hive.metastore.api.NoSuchObjectException Role nosuchrole does not exist
diff --git a/ql/src/test/results/clientnegative/authorize_grant_public.q.out b/ql/src/test/results/clientnegative/authorize_grant_public.q.out
index a2e8083..c1fe195 100644
--- a/ql/src/test/results/clientnegative/authorize_grant_public.q.out
+++ b/ql/src/test/results/clientnegative/authorize_grant_public.q.out
@@ -1,3 +1,3 @@
 PREHOOK: query: grant role public to user hive_test_user
 PREHOOK: type: GRANT_ROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:No user can be added to public. Since all users implicitly belong to public role.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:No user can be added to public. Since all users implicitly belong to public role.)
diff --git a/ql/src/test/results/clientnegative/authorize_revoke_public.q.out b/ql/src/test/results/clientnegative/authorize_revoke_public.q.out
index 8579c54..fb3448a 100644
--- a/ql/src/test/results/clientnegative/authorize_revoke_public.q.out
+++ b/ql/src/test/results/clientnegative/authorize_revoke_public.q.out
@@ -1,3 +1,3 @@
 PREHOOK: query: revoke role public from user hive_test_user
 PREHOOK: type: REVOKE_ROLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public role can't be revoked.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: MetaException(message:public role can't be revoked.)
diff --git a/ql/src/test/results/clientnegative/avro_add_column_extschema.q.out b/ql/src/test/results/clientnegative/avro_add_column_extschema.q.out
index 8040fe3..9493f97 100644
--- a/ql/src/test/results/clientnegative/avro_add_column_extschema.q.out
+++ b/ql/src/test/results/clientnegative/avro_add_column_extschema.q.out
@@ -40,4 +40,4 @@ CHANGE COLUMN number number bigint
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@avro_extschema
 PREHOOK: Output: default@avro_extschema
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Not allowed to alter schema of Avro stored table having external schema. Consider removing avro.schema.literal or avro.schema.url from table properties.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Not allowed to alter schema of Avro stored table having external schema. Consider removing avro.schema.literal or avro.schema.url from table properties.
diff --git a/ql/src/test/results/clientnegative/avro_decimal.q.out b/ql/src/test/results/clientnegative/avro_decimal.q.out
index c2bc4f4..3b46c7b 100644
--- a/ql/src/test/results/clientnegative/avro_decimal.q.out
+++ b/ql/src/test/results/clientnegative/avro_decimal.q.out
@@ -19,4 +19,4 @@ TBLPROPERTIES (
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@avro_dec
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.avro.AvroSerdeException Invalid precision or scale for decimal type)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.avro.AvroSerdeException Invalid precision or scale for decimal type)
diff --git a/ql/src/test/results/clientnegative/column_rename1.q.out b/ql/src/test/results/clientnegative/column_rename1.q.out
index a280774..1f0c5cf 100644
--- a/ql/src/test/results/clientnegative/column_rename1.q.out
+++ b/ql/src/test/results/clientnegative/column_rename1.q.out
@@ -26,4 +26,4 @@ PREHOOK: query: alter table tstsrc change src_not_exist key_value string
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@tstsrc
 PREHOOK: Output: default@tstsrc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference src_not_exist
+FAILED: Execution Error, return code 10002 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference src_not_exist
diff --git a/ql/src/test/results/clientnegative/column_rename2.q.out b/ql/src/test/results/clientnegative/column_rename2.q.out
index 3eeda3a..2fef073 100644
--- a/ql/src/test/results/clientnegative/column_rename2.q.out
+++ b/ql/src/test/results/clientnegative/column_rename2.q.out
@@ -26,4 +26,4 @@ PREHOOK: query: alter table tstsrc change key value string
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@tstsrc
 PREHOOK: Output: default@tstsrc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Duplicate column name: value
+FAILED: Execution Error, return code 10036 from org.apache.hadoop.hive.ql.ddl.DDLTask. Duplicate column name: value
diff --git a/ql/src/test/results/clientnegative/column_rename4.q.out b/ql/src/test/results/clientnegative/column_rename4.q.out
index 92c886c..afed214 100644
--- a/ql/src/test/results/clientnegative/column_rename4.q.out
+++ b/ql/src/test/results/clientnegative/column_rename4.q.out
@@ -26,4 +26,4 @@ PREHOOK: query: alter table tstsrc change key key2 string after key_value
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@tstsrc
 PREHOOK: Output: default@tstsrc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference key_value
+FAILED: Execution Error, return code 10002 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid column reference key_value
diff --git a/ql/src/test/results/clientnegative/compact_non_acid_table.q.out b/ql/src/test/results/clientnegative/compact_non_acid_table.q.out
index 34b9e91..dc8b01e 100644
--- a/ql/src/test/results/clientnegative/compact_non_acid_table.q.out
+++ b/ql/src/test/results/clientnegative/compact_non_acid_table.q.out
@@ -8,4 +8,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@not_an_acid_table
 PREHOOK: query: alter table not_an_acid_table compact 'major'
 PREHOOK: type: ALTERTABLE_COMPACT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Compaction is not allowed on non-ACID table default.not_an_acid_table
+FAILED: Execution Error, return code 10286 from org.apache.hadoop.hive.ql.ddl.DDLTask. Compaction is not allowed on non-ACID table default.not_an_acid_table
diff --git a/ql/src/test/results/clientnegative/create_external_acid.q.out b/ql/src/test/results/clientnegative/create_external_acid.q.out
index 11fa05d..84c4d7b 100644
--- a/ql/src/test/results/clientnegative/create_external_acid.q.out
+++ b/ql/src/test/results/clientnegative/create_external_acid.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: create external table acid_external (a int, b varchar(128)) clus
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_external
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:default.acid_external cannot be declared transactional because it's an external table)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:default.acid_external cannot be declared transactional because it's an external table)
diff --git a/ql/src/test/results/clientnegative/create_not_acid.q.out b/ql/src/test/results/clientnegative/create_not_acid.q.out
index e8a45e5..74973ac 100644
--- a/ql/src/test/results/clientnegative/create_not_acid.q.out
+++ b/ql/src/test/results/clientnegative/create_not_acid.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) TBLPROPERTI
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_notbucketed
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:The table must be stored using an ACID compliant format (such as ORC): default.acid_notbucketed)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:The table must be stored using an ACID compliant format (such as ORC): default.acid_notbucketed)
diff --git a/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out b/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out
index b6517ff..7de34c5 100644
--- a/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out
+++ b/ql/src/test/results/clientnegative/create_table_wrong_regex.q.out
@@ -8,6 +8,6 @@ PREHOOK: query: create table aa ( test STRING )
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@aa
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.util.regex.PatternSyntaxException: Unclosed character class near index 7
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.util.regex.PatternSyntaxException: Unclosed character class near index 7
 [^\](.*)
        ^
diff --git a/ql/src/test/results/clientnegative/create_view_failure1.q.out b/ql/src/test/results/clientnegative/create_view_failure1.q.out
index b960a5f..03ec5ae 100644
--- a/ql/src/test/results/clientnegative/create_view_failure1.q.out
+++ b/ql/src/test/results/clientnegative/create_view_failure1.q.out
@@ -15,4 +15,4 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src
 PREHOOK: Output: database:default
 PREHOOK: Output: default@xxx12
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table already exists: default.xxx12
+FAILED: Execution Error, return code 10073 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table already exists: default.xxx12
diff --git a/ql/src/test/results/clientnegative/create_view_failure2.q.out b/ql/src/test/results/clientnegative/create_view_failure2.q.out
index 52d2273..9688687 100644
--- a/ql/src/test/results/clientnegative/create_view_failure2.q.out
+++ b/ql/src/test/results/clientnegative/create_view_failure2.q.out
@@ -17,4 +17,4 @@ PREHOOK: query: CREATE TABLE xxx4(key int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@xxx4
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Table hive.default.xxx4 already exists)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Table hive.default.xxx4 already exists)
diff --git a/ql/src/test/results/clientnegative/create_view_failure4.q.out b/ql/src/test/results/clientnegative/create_view_failure4.q.out
index 19cf005..0d5d6c5 100644
--- a/ql/src/test/results/clientnegative/create_view_failure4.q.out
+++ b/ql/src/test/results/clientnegative/create_view_failure4.q.out
@@ -8,4 +8,4 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src
 PREHOOK: Output: database:default
 PREHOOK: Output: default@xxx5
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name x in the table definition.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name x in the table definition.
diff --git a/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out b/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out
index 41ed714..38fbbc2 100644
--- a/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out
+++ b/ql/src/test/results/clientnegative/create_with_constraints_duplicate_name.q.out
@@ -48,4 +48,4 @@ POSTHOOK: Output: database:db2
 POSTHOOK: Output: db2@t2
 PREHOOK: query: alter table t1 add constraint constraint_name foreign key (x) references t2(x) disable novalidate rely
 PREHOOK: type: ALTERTABLE_ADDCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Constraint name already exists: db2.t2.constraint_name)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Constraint name already exists: db2.t2.constraint_name)
diff --git a/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out b/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out
index d2a45bd..a8e0153 100644
--- a/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out
+++ b/ql/src/test/results/clientnegative/create_with_fk_constraint.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child column not found: x)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:Child column not found: x)
diff --git a/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out b/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out
index 4cfcb8b..43831a6 100644
--- a/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out
+++ b/ql/src/test/results/clientnegative/create_with_fk_pk_same_tab.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING PRIMARY KEY DISABLE, b STRING, CON
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;)
diff --git a/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out b/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out
index 5cf4dd9..31fe0f0 100644
--- a/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out
+++ b/ql/src/test/results/clientnegative/create_with_fk_uk_same_tab.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING UNIQUE DISABLE, b STRING, CONSTRAI
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot be both foreign key and primary/unique key on same table: a:string;)
diff --git a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out
index 1477a96..8d51055 100644
--- a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out
+++ b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references a:string; but no corresponding primary key or unique key exists. Possible keys: [a:int;])
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references a:string; but no corresponding primary key or unique key exists. Possible keys: [a:int;])
diff --git a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out
index 843a434..cb4f333 100644
--- a/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out
+++ b/ql/src/test/results/clientnegative/create_with_fk_wrong_ref2.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: CREATE TABLE table1 (a STRING, b STRING, CONSTRAINT fk1 FOREIGN
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references a:string;b:string; but no corresponding primary key or unique key exists. Possible keys: [b:int;a:string;, a:string;b:int;])
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Foreign key references a:string;b:string; but no corresponding primary key or unique key exists. Possible keys: [b:int;a:string;, a:string;b:int;])
diff --git a/ql/src/test/results/clientnegative/database_create_already_exists.q.out b/ql/src/test/results/clientnegative/database_create_already_exists.q.out
index 14746a6..a486507 100644
--- a/ql/src/test/results/clientnegative/database_create_already_exists.q.out
+++ b/ql/src/test/results/clientnegative/database_create_already_exists.q.out
@@ -12,4 +12,4 @@ POSTHOOK: Output: database:test_db
 PREHOOK: query: CREATE DATABASE test_db
 PREHOOK: type: CREATEDATABASE
 PREHOOK: Output: database:test_db
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Database test_db already exists
+FAILED: Execution Error, return code 10242 from org.apache.hadoop.hive.ql.ddl.DDLTask. Database test_db already exists
diff --git a/ql/src/test/results/clientnegative/database_create_invalid_name.q.out b/ql/src/test/results/clientnegative/database_create_invalid_name.q.out
index 9dba025..3d7d4d8 100644
--- a/ql/src/test/results/clientnegative/database_create_invalid_name.q.out
+++ b/ql/src/test/results/clientnegative/database_create_invalid_name.q.out
@@ -6,4 +6,4 @@ default
 PREHOOK: query: CREATE DATABASE `test§db`
 PREHOOK: type: CREATEDATABASE
 PREHOOK: Output: database:test§db
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:test§db is not a valid database name)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:test§db is not a valid database name)
diff --git a/ql/src/test/results/clientnegative/database_drop_not_empty.q.out b/ql/src/test/results/clientnegative/database_drop_not_empty.q.out
index d160464..20463ce 100644
--- a/ql/src/test/results/clientnegative/database_drop_not_empty.q.out
+++ b/ql/src/test/results/clientnegative/database_drop_not_empty.q.out
@@ -33,4 +33,4 @@ PREHOOK: query: DROP DATABASE test_db
 PREHOOK: type: DROPDATABASE
 PREHOOK: Input: database:test_db
 PREHOOK: Output: database:test_db
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database test_db is not empty. One or more tables exist.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database test_db is not empty. One or more tables exist.)
diff --git a/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out b/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out
index a8c009e..a01e472 100644
--- a/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out
+++ b/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out
@@ -33,4 +33,4 @@ PREHOOK: query: DROP DATABASE db_drop_non_empty_restrict
 PREHOOK: type: DROPDATABASE
 PREHOOK: Input: database:db_drop_non_empty_restrict
 PREHOOK: Output: database:db_drop_non_empty_restrict
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database db_drop_non_empty_restrict is not empty. One or more tables exist.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database db_drop_non_empty_restrict is not empty. One or more tables exist.)
diff --git a/ql/src/test/results/clientnegative/database_location_conflict.q.out b/ql/src/test/results/clientnegative/database_location_conflict.q.out
index 8034185..f0d2a4e 100644
--- a/ql/src/test/results/clientnegative/database_location_conflict.q.out
+++ b/ql/src/test/results/clientnegative/database_location_conflict.q.out
@@ -3,4 +3,4 @@ PREHOOK: query: CREATE DATABASE db
 PREHOOK: type: CREATEDATABASE
 PREHOOK: Output: database:db
 #### A masked pattern was here ####
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same
diff --git a/ql/src/test/results/clientnegative/database_location_conflict2.q.out b/ql/src/test/results/clientnegative/database_location_conflict2.q.out
index 80786bf..082bee2 100644
--- a/ql/src/test/results/clientnegative/database_location_conflict2.q.out
+++ b/ql/src/test/results/clientnegative/database_location_conflict2.q.out
@@ -11,4 +11,4 @@ POSTHOOK: Output: database:db
 PREHOOK: type: ALTERDATABASE_LOCATION
 PREHOOK: Output: database:db
 #### A masked pattern was here ####
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same
diff --git a/ql/src/test/results/clientnegative/database_location_conflict3.q.out b/ql/src/test/results/clientnegative/database_location_conflict3.q.out
index 80786bf..082bee2 100644
--- a/ql/src/test/results/clientnegative/database_location_conflict3.q.out
+++ b/ql/src/test/results/clientnegative/database_location_conflict3.q.out
@@ -11,4 +11,4 @@ POSTHOOK: Output: database:db
 PREHOOK: type: ALTERDATABASE_LOCATION
 PREHOOK: Output: database:db
 #### A masked pattern was here ####
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Managed and external locations for database cannot be the same
diff --git a/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out b/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out
index b2b19ce..de0332a 100644
--- a/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out
+++ b/ql/src/test/results/clientnegative/dbtxnmgr_nodblock.q.out
@@ -12,4 +12,4 @@ PREHOOK: query: lock database drop_nodblock shared
 PREHOOK: type: LOCKDATABASE
 PREHOOK: Input: database:drop_nodblock
 PREHOOK: Output: database:drop_nodblock
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
+FAILED: Execution Error, return code 10271 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
diff --git a/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out b/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out
index 4455e0c..1ed0dcf 100644
--- a/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out
+++ b/ql/src/test/results/clientnegative/dbtxnmgr_nodbunlock.q.out
@@ -12,4 +12,4 @@ PREHOOK: query: unlock database drop_nodbunlock
 PREHOOK: type: UNLOCKDATABASE
 PREHOOK: Input: database:drop_nodbunlock
 PREHOOK: Output: database:drop_nodbunlock
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
+FAILED: Execution Error, return code 10271 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
diff --git a/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out b/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out
index c91d3e0..098aae6 100644
--- a/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out
+++ b/ql/src/test/results/clientnegative/dbtxnmgr_notablelock.q.out
@@ -12,4 +12,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@drop_notablelock
 PREHOOK: query: lock table drop_notablelock shared
 PREHOOK: type: LOCKTABLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
+FAILED: Execution Error, return code 10271 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
diff --git a/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out b/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out
index 80066d7..8211dd4 100644
--- a/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out
+++ b/ql/src/test/results/clientnegative/dbtxnmgr_notableunlock.q.out
@@ -12,4 +12,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@drop_notableunlock
 PREHOOK: query: unlock table drop_notableunlock
 PREHOOK: type: UNLOCKTABLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
+FAILED: Execution Error, return code 10271 from org.apache.hadoop.hive.ql.ddl.DDLTask. Current transaction manager does not support explicit lock requests.  Transaction manager:   org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
diff --git a/ql/src/test/results/clientnegative/deletejar.q.out b/ql/src/test/results/clientnegative/deletejar.q.out
index 2827196..65dbc71 100644
--- a/ql/src/test/results/clientnegative/deletejar.q.out
+++ b/ql/src/test/results/clientnegative/deletejar.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: CREATE TABLE DELETEJAR(KEY STRING, VALUE STRING) ROW FORMAT SERD
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@DELETEJAR
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot validate serde: org.apache.hadoop.hive.serde2.TestSerDe
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot validate serde: org.apache.hadoop.hive.serde2.TestSerDe
diff --git a/ql/src/test/results/clientnegative/describe_xpath1.q.out b/ql/src/test/results/clientnegative/describe_xpath1.q.out
index 61cb539..af51e38 100644
--- a/ql/src/test/results/clientnegative/describe_xpath1.q.out
+++ b/ql/src/test/results/clientnegative/describe_xpath1.q.out
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift $elem$
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_thrift
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. cannot find field $elem$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test. [...]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. cannot find field $elem$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.t [...]
diff --git a/ql/src/test/results/clientnegative/describe_xpath2.q.out b/ql/src/test/results/clientnegative/describe_xpath2.q.out
index 5f3f849..3defc6f 100644
--- a/ql/src/test/results/clientnegative/describe_xpath2.q.out
+++ b/ql/src/test/results/clientnegative/describe_xpath2.q.out
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift $key$
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_thrift
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. cannot find field $key$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.C [...]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. cannot find field $key$ from [private int org.apache.hadoop.hive.serde2.thrift.test.Complex.aint, private java.lang.String org.apache.hadoop.hive.serde2.thrift.test.Complex.aString, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lint, private java.util.List org.apache.hadoop.hive.serde2.thrift.test.Complex.lString, private java.util.List org.apache.hadoop.hive.serde2.thrift.te [...]
diff --git a/ql/src/test/results/clientnegative/describe_xpath3.q.out b/ql/src/test/results/clientnegative/describe_xpath3.q.out
index c73abb3..f243d11 100644
--- a/ql/src/test/results/clientnegative/describe_xpath3.q.out
+++ b/ql/src/test/results/clientnegative/describe_xpath3.q.out
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift lint.abc
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_thrift
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error in getting fields from serde.Unknown type for abc
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error in getting fields from serde.Unknown type for abc
diff --git a/ql/src/test/results/clientnegative/describe_xpath4.q.out b/ql/src/test/results/clientnegative/describe_xpath4.q.out
index d81b629..8282039 100644
--- a/ql/src/test/results/clientnegative/describe_xpath4.q.out
+++ b/ql/src/test/results/clientnegative/describe_xpath4.q.out
@@ -1,4 +1,4 @@
 PREHOOK: query: describe src_thrift mStringString.abc
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@src_thrift
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error in getting fields from serde.Unknown type for abc
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Error in getting fields from serde.Unknown type for abc
diff --git a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
index 2eeef04..c93a8f9 100644
--- a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
+++ b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out
@@ -104,5 +104,5 @@ PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@test_table123
 PREHOOK: Output: default@test_table123
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 a,b
diff --git a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out
index 16c40ed..b98e689 100644
--- a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out
+++ b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out
@@ -40,5 +40,5 @@ PREHOOK: query: ALTER TABLE test_table123 CHANGE COLUMN b b MAP<STRING, STRING>
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@test_table123
 PREHOOK: Output: default@test_table123
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 b
diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out
index 5bec46e..c43be94 100644
--- a/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out
+++ b/ql/src/test/results/clientnegative/drop_invalid_constraint1.q.out
@@ -12,4 +12,4 @@ POSTHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
 POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT
 PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1)
diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out
index 7f42c4c..8c1be49 100644
--- a/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out
+++ b/ql/src/test/results/clientnegative/drop_invalid_constraint2.q.out
@@ -8,4 +8,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table2
 PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk1
 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk1 does not exist for the associated table: default.table1)
diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out
index e8081d6..181153f 100644
--- a/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out
+++ b/ql/src/test/results/clientnegative/drop_invalid_constraint3.q.out
@@ -8,4 +8,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table2
 PREHOOK: query: ALTER TABLE table2 DROP CONSTRAINT pk2
 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table2)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table2)
diff --git a/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out b/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out
index a930844..ed39a44 100644
--- a/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out
+++ b/ql/src/test/results/clientnegative/drop_invalid_constraint4.q.out
@@ -16,4 +16,4 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table2
 PREHOOK: query: ALTER TABLE table1 DROP CONSTRAINT pk2
 PREHOOK: type: ALTERTABLE_DROPCONSTRAINT
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table1)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidObjectException(message:The constraint: pk2 does not exist for the associated table: default.table1)
diff --git a/ql/src/test/results/clientnegative/drop_table_failure2.q.out b/ql/src/test/results/clientnegative/drop_table_failure2.q.out
index b31c18b..6abfc4b 100644
--- a/ql/src/test/results/clientnegative/drop_table_failure2.q.out
+++ b/ql/src/test/results/clientnegative/drop_table_failure2.q.out
@@ -13,4 +13,4 @@ PREHOOK: query: DROP TABLE xxx6
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@xxx6
 PREHOOK: Output: default@xxx6
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a view with DROP TABLE
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a view with DROP TABLE
diff --git a/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out b/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out
index 5d980c1..77ec4cf 100644
--- a/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out
+++ b/ql/src/test/results/clientnegative/drop_table_used_by_mv.q.out
@@ -32,5 +32,5 @@ PREHOOK: query: drop table mytable
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@mytable
 PREHOOK: Output: default@mytable
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop table as it is used in the following materialized views [default.mv1]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop table as it is used in the following materialized views [default.mv1]
 )
diff --git a/ql/src/test/results/clientnegative/drop_table_used_by_mv2.q.out b/ql/src/test/results/clientnegative/drop_table_used_by_mv2.q.out
index 53ae0f8..54da2ea 100644
--- a/ql/src/test/results/clientnegative/drop_table_used_by_mv2.q.out
+++ b/ql/src/test/results/clientnegative/drop_table_used_by_mv2.q.out
@@ -68,5 +68,5 @@ PREHOOK: query: drop table mytable
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@mytable
 PREHOOK: Output: default@mytable
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop table as it is used in the following materialized views [default.mv1, default.mv2]
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop table as it is used in the following materialized views [default.mv1, default.mv2]
 )
diff --git a/ql/src/test/results/clientnegative/drop_view_failure1.q.out b/ql/src/test/results/clientnegative/drop_view_failure1.q.out
index 55ca792..e0a55bd 100644
--- a/ql/src/test/results/clientnegative/drop_view_failure1.q.out
+++ b/ql/src/test/results/clientnegative/drop_view_failure1.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: DROP VIEW xxx1
 PREHOOK: type: DROPVIEW
 PREHOOK: Input: default@xxx1
 PREHOOK: Output: default@xxx1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a base table with DROP VIEW
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a base table with DROP VIEW
diff --git a/ql/src/test/results/clientnegative/druid_address.q.out b/ql/src/test/results/clientnegative/druid_address.q.out
index dcd7290..cb93180 100644
--- a/ql/src/test/results/clientnegative/druid_address.q.out
+++ b/ql/src/test/results/clientnegative/druid_address.q.out
@@ -4,4 +4,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid broker address not specified in configuration)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid broker address not specified in configuration)
diff --git a/ql/src/test/results/clientnegative/druid_buckets.q.out b/ql/src/test/results/clientnegative/druid_buckets.q.out
index a229f5f..0f926ae 100644
--- a/ql/src/test/results/clientnegative/druid_buckets.q.out
+++ b/ql/src/test/results/clientnegative/druid_buckets.q.out
@@ -5,4 +5,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:CLUSTERED BY may not be specified for Druid)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:CLUSTERED BY may not be specified for Druid)
diff --git a/ql/src/test/results/clientnegative/druid_case.q.out b/ql/src/test/results/clientnegative/druid_case.q.out
index b9bf9eb..78ff418 100644
--- a/ql/src/test/results/clientnegative/druid_case.q.out
+++ b/ql/src/test/results/clientnegative/druid_case.q.out
@@ -4,4 +4,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name anonymous in the table definition.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name anonymous in the table definition.
diff --git a/ql/src/test/results/clientnegative/druid_datasource.q.out b/ql/src/test/results/clientnegative/druid_datasource.q.out
index 40c8e9c..426bb18 100644
--- a/ql/src/test/results/clientnegative/druid_datasource.q.out
+++ b/ql/src/test/results/clientnegative/druid_datasource.q.out
@@ -4,4 +4,4 @@ TBLPROPERTIES ("property" = "localhost")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid data source not specified; use druid.datasource in table properties)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid data source not specified; use druid.datasource in table properties)
diff --git a/ql/src/test/results/clientnegative/druid_datasource2.q.out b/ql/src/test/results/clientnegative/druid_datasource2.q.out
index 7781884..4bbba4f 100644
--- a/ql/src/test/results/clientnegative/druid_datasource2.q.out
+++ b/ql/src/test/results/clientnegative/druid_datasource2.q.out
@@ -4,4 +4,4 @@ TBLPROPERTIES ("property" = "localhost", "druid.datasource" = "mydatasource")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.druid_table_1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.druid_table_1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
diff --git a/ql/src/test/results/clientnegative/druid_location.q.out b/ql/src/test/results/clientnegative/druid_location.q.out
index 176ac76..dc47a18 100644
--- a/ql/src/test/results/clientnegative/druid_location.q.out
+++ b/ql/src/test/results/clientnegative/druid_location.q.out
@@ -6,4 +6,4 @@ PREHOOK: type: CREATETABLE
 #### A masked pattern was here ####
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:LOCATION may not be specified for Druid)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:LOCATION may not be specified for Druid)
diff --git a/ql/src/test/results/clientnegative/druid_partitions.q.out b/ql/src/test/results/clientnegative/druid_partitions.q.out
index 52627cc..f977d6c 100644
--- a/ql/src/test/results/clientnegative/druid_partitions.q.out
+++ b/ql/src/test/results/clientnegative/druid_partitions.q.out
@@ -5,4 +5,4 @@ TBLPROPERTIES ("druid.datasource" = "wikipedia")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@druid_table_1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:PARTITIONED BY may not be specified for Druid)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:PARTITIONED BY may not be specified for Druid)
diff --git a/ql/src/test/results/clientnegative/dyn_part_max.q.out b/ql/src/test/results/clientnegative/dyn_part_max.q.out
index 7de4998..2ae09ea 100644
--- a/ql/src/test/results/clientnegative/dyn_part_max.q.out
+++ b/ql/src/test/results/clientnegative/dyn_part_max.q.out
@@ -19,4 +19,4 @@ LIMIT 50
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@max_parts
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.MoveTask. Number of dynamic partitions created is 49, which is more than 10. To solve this try to set hive.exec.max.dynamic.partitions to at least 49.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.exec.MoveTask. Number of dynamic partitions created is 49, which is more than 10. To solve this try to set hive.exec.max.dynamic.partitions to at least 49.
diff --git a/ql/src/test/results/clientnegative/exchange_partition.q.out b/ql/src/test/results/clientnegative/exchange_partition.q.out
index 76d6260..925a02d 100644
--- a/ql/src/test/results/clientnegative/exchange_partition.q.out
+++ b/ql/src/test/results/clientnegative/exchange_partition.q.out
@@ -53,4 +53,4 @@ PREHOOK: query: ALTER TABLE ex_table1 EXCHANGE PARTITION (part='part1') WITH TAB
 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
 PREHOOK: Input: default@ex_table2
 PREHOOK: Output: default@ex_table1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Got exception: java.io.IOException Cannot rename the source path. The destination path already exists.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Got exception: java.io.IOException Cannot rename the source path. The destination path already exists.)
diff --git a/ql/src/test/results/clientnegative/external1.q.out b/ql/src/test/results/clientnegative/external1.q.out
index c556ca2..258f593 100644
--- a/ql/src/test/results/clientnegative/external1.q.out
+++ b/ql/src/test/results/clientnegative/external1.q.out
@@ -3,4 +3,4 @@ PREHOOK: type: CREATETABLE
 #### A masked pattern was here ####
 PREHOOK: Output: database:default
 PREHOOK: Output: default@external1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme"
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme"
diff --git a/ql/src/test/results/clientnegative/external2.q.out b/ql/src/test/results/clientnegative/external2.q.out
index 8fb7924..5ca73cb 100644
--- a/ql/src/test/results/clientnegative/external2.q.out
+++ b/ql/src/test/results/clientnegative/external2.q.out
@@ -10,4 +10,4 @@ POSTHOOK: Output: default@external2
 PREHOOK: type: ALTERTABLE_ADDPARTS
 #### A masked pattern was here ####
 PREHOOK: Output: default@external2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme"
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "invalidscheme"
diff --git a/ql/src/test/results/clientnegative/hms_using_serde_alter_table_update_columns.q.out b/ql/src/test/results/clientnegative/hms_using_serde_alter_table_update_columns.q.out
index 22b9170..bad06d6 100644
--- a/ql/src/test/results/clientnegative/hms_using_serde_alter_table_update_columns.q.out
+++ b/ql/src/test/results/clientnegative/hms_using_serde_alter_table_update_columns.q.out
@@ -15,4 +15,4 @@ POSTHOOK: Input: default@hmsserdetable
 name                	string              	                    
 PREHOOK: query: ALTER TABLE hmsserdetable UPDATE COLUMNS
 PREHOOK: type: ALTERTABLE_UPDATECOLUMNS
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. hmsserdetable has serde org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe for which schema is already handled by HMS.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. hmsserdetable has serde org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe for which schema is already handled by HMS.
diff --git a/ql/src/test/results/clientnegative/insert_sorted.q.out b/ql/src/test/results/clientnegative/insert_sorted.q.out
index a28fa1e..762d586 100644
--- a/ql/src/test/results/clientnegative/insert_sorted.q.out
+++ b/ql/src/test/results/clientnegative/insert_sorted.q.out
@@ -20,4 +20,4 @@ PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@acid_insertsort
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.acid_insertsort cannot support full ACID functionality since it is sorted.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.acid_insertsort cannot support full ACID functionality since it is sorted.)
diff --git a/ql/src/test/results/clientnegative/lockneg2.q.out b/ql/src/test/results/clientnegative/lockneg2.q.out
index 2443341..d5cff03 100644
--- a/ql/src/test/results/clientnegative/lockneg2.q.out
+++ b/ql/src/test/results/clientnegative/lockneg2.q.out
@@ -22,4 +22,4 @@ POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string
 POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: UNLOCK TABLE tstsrc
 PREHOOK: type: UNLOCKTABLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table tstsrc is not locked 
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table tstsrc is not locked 
diff --git a/ql/src/test/results/clientnegative/lockneg3.q.out b/ql/src/test/results/clientnegative/lockneg3.q.out
index 7c1983d..ad548cb 100644
--- a/ql/src/test/results/clientnegative/lockneg3.q.out
+++ b/ql/src/test/results/clientnegative/lockneg3.q.out
@@ -26,4 +26,4 @@ POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpar
 POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: UNLOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11')
 PREHOOK: type: UNLOCKTABLE
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table tstsrcpart is not locked 
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Table tstsrcpart is not locked 
diff --git a/ql/src/test/results/clientnegative/lockneg4.q.out b/ql/src/test/results/clientnegative/lockneg4.q.out
index 5655415..59741db 100644
--- a/ql/src/test/results/clientnegative/lockneg4.q.out
+++ b/ql/src/test/results/clientnegative/lockneg4.q.out
@@ -30,4 +30,4 @@ POSTHOOK: query: LOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') EXCLU
 POSTHOOK: type: LOCKTABLE
 PREHOOK: query: SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='12')
 PREHOOK: type: SHOWLOCKS
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Partition {ds=2008-04-08, hr=12} for table tstsrcpart does not exist
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Partition {ds=2008-04-08, hr=12} for table tstsrcpart does not exist
diff --git a/ql/src/test/results/clientnegative/lockneg5.q.out b/ql/src/test/results/clientnegative/lockneg5.q.out
index c02312b..08487ef 100644
--- a/ql/src/test/results/clientnegative/lockneg5.q.out
+++ b/ql/src/test/results/clientnegative/lockneg5.q.out
@@ -4,4 +4,4 @@ POSTHOOK: query: drop table tstsrcpart
 POSTHOOK: type: DROPTABLE
 PREHOOK: query: show locks tstsrcpart extended
 PREHOOK: type: SHOWLOCKS
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.InvalidTableException: Table not found tstsrcpart
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.InvalidTableException: Table not found tstsrcpart
diff --git a/ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out b/ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out
index 5df0453..3e62801 100644
--- a/ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out
+++ b/ql/src/test/results/clientnegative/lockneg_query_tbl_in_locked_db.q.out
@@ -59,4 +59,4 @@ PREHOOK: query: unlock database lockneg1
 PREHOOK: type: UNLOCKDATABASE
 PREHOOK: Input: database:lockneg1
 PREHOOK: Output: database:lockneg1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Database lockneg1 is not locked 
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Database lockneg1 is not locked 
diff --git a/ql/src/test/results/clientnegative/materialized_view_drop.q.out b/ql/src/test/results/clientnegative/materialized_view_drop.q.out
index e860283..35d3c5b 100644
--- a/ql/src/test/results/clientnegative/materialized_view_drop.q.out
+++ b/ql/src/test/results/clientnegative/materialized_view_drop.q.out
@@ -39,4 +39,4 @@ PREHOOK: query: drop materialized view cmv_basetable
 PREHOOK: type: DROP_MATERIALIZED_VIEW
 PREHOOK: Input: default@cmv_basetable
 PREHOOK: Output: default@cmv_basetable
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a base table with DROP MATERIALIZED VIEW
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a base table with DROP MATERIALIZED VIEW
diff --git a/ql/src/test/results/clientnegative/materialized_view_drop2.q.out b/ql/src/test/results/clientnegative/materialized_view_drop2.q.out
index 4671de1..803b5b4 100644
--- a/ql/src/test/results/clientnegative/materialized_view_drop2.q.out
+++ b/ql/src/test/results/clientnegative/materialized_view_drop2.q.out
@@ -31,4 +31,4 @@ PREHOOK: query: drop view cmv_mat_view
 PREHOOK: type: DROPVIEW
 PREHOOK: Input: default@cmv_mat_view
 PREHOOK: Output: default@cmv_mat_view
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a materialized view with DROP VIEW
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot drop a materialized view with DROP VIEW
diff --git a/ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out b/ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out
index 7e22225..b2cb1db 100644
--- a/ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out
+++ b/ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out
@@ -33,4 +33,4 @@ PREHOOK: query: alter materialized view cmv_mat_view enable rewrite
 PREHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE
 PREHOOK: Input: default@cmv_mat_view
 PREHOOK: Output: default@cmv_mat_view
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Cannot enable rewriting for materialized view. Statement has unsupported clause: sort by.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Cannot enable rewriting for materialized view. Statement has unsupported clause: sort by.
diff --git a/ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out b/ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out
index cd8f5fa..135d89b 100644
--- a/ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out
+++ b/ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out
@@ -34,4 +34,4 @@ PREHOOK: query: alter materialized view cmv_mat_view enable rewrite
 PREHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE
 PREHOOK: Input: default@cmv_mat_view
 PREHOOK: Output: default@cmv_mat_view
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Cannot enable rewriting for materialized view. LEFT join type is not supported by rewriting algorithm.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Cannot enable rewriting for materialized view. LEFT join type is not supported by rewriting algorithm.
diff --git a/ql/src/test/results/clientnegative/mm_convert.q.out b/ql/src/test/results/clientnegative/mm_convert.q.out
index ee52c15..e0e49e4 100644
--- a/ql/src/test/results/clientnegative/mm_convert.q.out
+++ b/ql/src/test/results/clientnegative/mm_convert.q.out
@@ -14,4 +14,4 @@ PREHOOK: query: alter table convert_mm unset tblproperties('transactional_proper
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@convert_mm
 PREHOOK: Output: default@convert_mm
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot convert an ACID table to non-ACID
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Cannot convert an ACID table to non-ACID
diff --git a/ql/src/test/results/clientnegative/nested_complex_neg.q.out b/ql/src/test/results/clientnegative/nested_complex_neg.q.out
index d3ecca1..39b4461 100644
--- a/ql/src/test/results/clientnegative/nested_complex_neg.q.out
+++ b/ql/src/test/results/clientnegative/nested_complex_neg.q.out
@@ -7,4 +7,4 @@ simple_string string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@nestedcomplex
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Number of levels of nesting supported for LazySimpleSerde is 7 Unable to work with level 23. Use hive.serialization.extend.nesting.levels serde property for tables using LazySimpleSerde.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Number of levels of nesting supported for LazySimpleSerde is 7 Unable to work with level 23. Use hive.serialization.extend.nesting.levels serde property for tables using LazySimpleSerde.)
diff --git a/ql/src/test/results/clientnegative/orc_change_fileformat.q.out b/ql/src/test/results/clientnegative/orc_change_fileformat.q.out
index e542961..9652c0d 100644
--- a/ql/src/test/results/clientnegative/orc_change_fileformat.q.out
+++ b/ql/src/test/results/clientnegative/orc_change_fileformat.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set fileformat textfile
 PREHOOK: type: ALTERTABLE_FILEFORMAT
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing file format (from ORC) is not supported for table default.src_orc
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing file format (from ORC) is not supported for table default.src_orc
diff --git a/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out b/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out
index 375795a..ec094df 100644
--- a/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out
+++ b/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set fileformat textfile
 PREHOOK: type: ALTERTABLE_FILEFORMAT
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing file format (from ORC) is not supported for table default.src_orc
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing file format (from ORC) is not supported for table default.src_orc
diff --git a/ql/src/test/results/clientnegative/orc_change_serde.q.out b/ql/src/test/results/clientnegative/orc_change_serde.q.out
index aea4713..be73373 100644
--- a/ql/src/test/results/clientnegative/orc_change_serde.q.out
+++ b/ql/src/test/results/clientnegative/orc_change_serde.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set serde 'org.apache.hadoop.hive.serde2.col
 PREHOOK: type: ALTERTABLE_SERIALIZER
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible
diff --git a/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out b/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out
index 60938e4..746f9bf 100644
--- a/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out
+++ b/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc set serde 'org.apache.hadoop.hive.serde2.col
 PREHOOK: type: ALTERTABLE_SERIALIZER
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Changing SerDe (from OrcSerde) is not supported for table default.src_orc. File format may be incompatible
diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out b/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out
index f415350..f64329b 100644
--- a/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out
+++ b/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc change key k tinyint first
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible
+FAILED: Execution Error, return code 10311 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible
diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out b/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out
index 611fbb0..a1a790e 100644
--- a/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out
+++ b/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc change key k tinyint first
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible
+FAILED: Execution Error, return code 10311 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible
diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out b/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out
index 2fb288c..63a1833 100644
--- a/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out
+++ b/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc change key k tinyint after val
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible
+FAILED: Execution Error, return code 10311 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible
diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out b/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out
index 2deb291..37f53c6 100644
--- a/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out
+++ b/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc change key k tinyint after val
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible
+FAILED: Execution Error, return code 10311 from org.apache.hadoop.hive.ql.ddl.DDLTask. Reordering columns is not supported for table default.src_orc. SerDe may be incompatible
diff --git a/ql/src/test/results/clientnegative/orc_replace_columns1.q.out b/ql/src/test/results/clientnegative/orc_replace_columns1.q.out
index f3fcae5..716d3c2 100644
--- a/ql/src/test/results/clientnegative/orc_replace_columns1.q.out
+++ b/ql/src/test/results/clientnegative/orc_replace_columns1.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc replace columns (k int)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.src_orc. SerDe may be incompatible
+FAILED: Execution Error, return code 10313 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.src_orc. SerDe may be incompatible
diff --git a/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out b/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out
index 6c66155..d14551e 100644
--- a/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out
+++ b/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table src_orc replace columns (k int)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.src_orc. SerDe may be incompatible
+FAILED: Execution Error, return code 10313 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.src_orc. SerDe may be incompatible
diff --git a/ql/src/test/results/clientnegative/orc_replace_columns2.q.out b/ql/src/test/results/clientnegative/orc_replace_columns2.q.out
index 5c0b45a..8e19445 100644
--- a/ql/src/test/results/clientnegative/orc_replace_columns2.q.out
+++ b/ql/src/test/results/clientnegative/orc_replace_columns2.q.out
@@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc replace columns (k smallint, val int)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 val
diff --git a/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out b/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out
index 8ebb960..13f3356 100644
--- a/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out
+++ b/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out
@@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc replace columns (k smallint, val int)
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 val
diff --git a/ql/src/test/results/clientnegative/orc_replace_columns3.q.out b/ql/src/test/results/clientnegative/orc_replace_columns3.q.out
index 3af387f..ca3d679 100644
--- a/ql/src/test/results/clientnegative/orc_replace_columns3.q.out
+++ b/ql/src/test/results/clientnegative/orc_replace_columns3.q.out
@@ -18,5 +18,5 @@ PREHOOK: query: alter table src_orc replace columns (k int, val string, z tinyin
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 z
diff --git a/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out b/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out
index 6c0fd97..99b5a3c 100644
--- a/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out
+++ b/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out
@@ -18,5 +18,5 @@ PREHOOK: query: alter table src_orc replace columns (k int, val string, z tinyin
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 z
diff --git a/ql/src/test/results/clientnegative/orc_type_promotion1.q.out b/ql/src/test/results/clientnegative/orc_type_promotion1.q.out
index 2152df8..7c3b35b 100644
--- a/ql/src/test/results/clientnegative/orc_type_promotion1.q.out
+++ b/ql/src/test/results/clientnegative/orc_type_promotion1.q.out
@@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc change key key int
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 key
diff --git a/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out b/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out
index 71119d0..d82cd81 100644
--- a/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out
+++ b/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out
@@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc change key key int
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 key
diff --git a/ql/src/test/results/clientnegative/orc_type_promotion2.q.out b/ql/src/test/results/clientnegative/orc_type_promotion2.q.out
index 0b60b97..46c0229 100644
--- a/ql/src/test/results/clientnegative/orc_type_promotion2.q.out
+++ b/ql/src/test/results/clientnegative/orc_type_promotion2.q.out
@@ -66,5 +66,5 @@ PREHOOK: query: alter table src_orc change val val int
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 val
diff --git a/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out b/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out
index 3bcf7ae..b0c04a4 100644
--- a/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out
+++ b/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out
@@ -66,5 +66,5 @@ PREHOOK: query: alter table src_orc change val val int
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 val
diff --git a/ql/src/test/results/clientnegative/orc_type_promotion3.q.out b/ql/src/test/results/clientnegative/orc_type_promotion3.q.out
index 05dbcb6..b7f4c3d 100644
--- a/ql/src/test/results/clientnegative/orc_type_promotion3.q.out
+++ b/ql/src/test/results/clientnegative/orc_type_promotion3.q.out
@@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc change key key smallint
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 key
diff --git a/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out b/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out
index bfcb6ee..e05f5b6 100644
--- a/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out
+++ b/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out
@@ -10,5 +10,5 @@ PREHOOK: query: alter table src_orc change key key smallint
 PREHOOK: type: ALTERTABLE_RENAMECOL
 PREHOOK: Input: default@src_orc
 PREHOOK: Output: default@src_orc
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions :
 key
diff --git a/ql/src/test/results/clientnegative/parquet_alter_part_table_drop_columns.q.out b/ql/src/test/results/clientnegative/parquet_alter_part_table_drop_columns.q.out
index 5b3936d..7556aef 100644
--- a/ql/src/test/results/clientnegative/parquet_alter_part_table_drop_columns.q.out
+++ b/ql/src/test/results/clientnegative/parquet_alter_part_table_drop_columns.q.out
@@ -50,4 +50,4 @@ favnumber int
 PREHOOK: type: ALTERTABLE_REPLACECOLS
 PREHOOK: Input: default@myparquettable_parted
 PREHOOK: Output: default@myparquettable_parted
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.myparquettable_parted. SerDe may be incompatible
+FAILED: Execution Error, return code 10313 from org.apache.hadoop.hive.ql.ddl.DDLTask. Replacing columns cannot drop columns for table default.myparquettable_parted. SerDe may be incompatible
diff --git a/ql/src/test/results/clientnegative/serde_regex.q.out b/ql/src/test/results/clientnegative/serde_regex.q.out
index 231bc57..b316bc7 100644
--- a/ql/src/test/results/clientnegative/serde_regex.q.out
+++ b/ql/src/test/results/clientnegative/serde_regex.q.out
@@ -22,4 +22,4 @@ STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@serde_regex
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.serde2.RegexSerDe doesn't allow column [9] named strct with type struct<a:int,b:string>)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException org.apache.hadoop.hive.serde2.RegexSerDe doesn't allow column [9] named strct with type struct<a:int,b:string>)
diff --git a/ql/src/test/results/clientnegative/serde_regex3.q.out b/ql/src/test/results/clientnegative/serde_regex3.q.out
index 5348afd..65cdcb3 100644
--- a/ql/src/test/results/clientnegative/serde_regex3.q.out
+++ b/ql/src/test/results/clientnegative/serde_regex3.q.out
@@ -19,4 +19,4 @@ STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@serde_regex
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException This table does not have serde property "input.regex"!)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException This table does not have serde property "input.regex"!)
diff --git a/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out b/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out
index 7793afc..0d69543 100644
--- a/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out
+++ b/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out
@@ -2,4 +2,4 @@ PREHOOK: query: create table `c/b/o_t1`(key string, value string, c_int int, c_f
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@c/b/o_t1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: [c/b/o_t1]: is not a valid table name
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: [c/b/o_t1]: is not a valid table name
diff --git a/ql/src/test/results/clientnegative/strict_managed_tables1.q.out b/ql/src/test/results/clientnegative/strict_managed_tables1.q.out
index e11460e..1ece989 100644
--- a/ql/src/test/results/clientnegative/strict_managed_tables1.q.out
+++ b/ql/src/test/results/clientnegative/strict_managed_tables1.q.out
@@ -26,4 +26,4 @@ PREHOOK: query: create table strict_managed_tables1_tab4 (c1 string, c2 string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@strict_managed_tables1_tab4
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables1_tab4 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables1_tab4 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
diff --git a/ql/src/test/results/clientnegative/strict_managed_tables2.q.out b/ql/src/test/results/clientnegative/strict_managed_tables2.q.out
index 04b878d..edc1788 100644
--- a/ql/src/test/results/clientnegative/strict_managed_tables2.q.out
+++ b/ql/src/test/results/clientnegative/strict_managed_tables2.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table strict_managed_tables2_tab1 set tblproperties ('EXTE
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@strict_managed_tables2_tab1
 PREHOOK: Output: default@strict_managed_tables2_tab1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Table default.strict_managed_tables2_tab1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Table default.strict_managed_tables2_tab1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.
diff --git a/ql/src/test/results/clientnegative/strict_managed_tables3.q.out b/ql/src/test/results/clientnegative/strict_managed_tables3.q.out
index ed92c03..e938c8c 100644
--- a/ql/src/test/results/clientnegative/strict_managed_tables3.q.out
+++ b/ql/src/test/results/clientnegative/strict_managed_tables3.q.out
@@ -10,4 +10,4 @@ PREHOOK: query: alter table strict_managed_tables3_tab1 unset tblproperties ('EX
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@strict_managed_tables3_tab1
 PREHOOK: Output: default@strict_managed_tables3_tab1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Table default.strict_managed_tables3_tab1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Table default.strict_managed_tables3_tab1 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.
diff --git a/ql/src/test/results/clientnegative/strict_managed_tables4.q.out b/ql/src/test/results/clientnegative/strict_managed_tables4.q.out
index 924f03b..2a120e0 100644
--- a/ql/src/test/results/clientnegative/strict_managed_tables4.q.out
+++ b/ql/src/test/results/clientnegative/strict_managed_tables4.q.out
@@ -28,4 +28,4 @@ STORED AS AVRO
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@strict_managed_tables6_tab2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables6_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables6_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
diff --git a/ql/src/test/results/clientnegative/strict_managed_tables5.q.out b/ql/src/test/results/clientnegative/strict_managed_tables5.q.out
index a233b16..f1294fd 100644
--- a/ql/src/test/results/clientnegative/strict_managed_tables5.q.out
+++ b/ql/src/test/results/clientnegative/strict_managed_tables5.q.out
@@ -16,4 +16,4 @@ STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler'
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@strict_managed_tables5_tab2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables5_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Table default.strict_managed_tables5_tab2 failed strict managed table checks due to the following reason: Table is marked as a managed table but is not transactional.)
diff --git a/ql/src/test/results/clientnegative/strict_managed_tables6.q.out b/ql/src/test/results/clientnegative/strict_managed_tables6.q.out
index 09154e2..077bd7d 100644
--- a/ql/src/test/results/clientnegative/strict_managed_tables6.q.out
+++ b/ql/src/test/results/clientnegative/strict_managed_tables6.q.out
@@ -27,4 +27,4 @@ PREHOOK: type: ALTERTABLE_LOCATION
 PREHOOK: Input: smt6@strict_managed_tables1_tab1
 #### A masked pattern was here ####
 PREHOOK: Output: smt6@strict_managed_tables1_tab1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Cannot change location of a managed table hive.smt6.strict_managed_tables1_tab1 as it is enabled for replication.
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Cannot change location of a managed table hive.smt6.strict_managed_tables1_tab1 as it is enabled for replication.
diff --git a/ql/src/test/results/clientnegative/temp_table_addpart1.q.out b/ql/src/test/results/clientnegative/temp_table_addpart1.q.out
index 524a8e6..daa2436 100644
--- a/ql/src/test/results/clientnegative/temp_table_addpart1.q.out
+++ b/ql/src/test/results/clientnegative/temp_table_addpart1.q.out
@@ -23,4 +23,4 @@ b=f/c=s
 PREHOOK: query: alter table addpart1_temp add partition (b='f', c='')
 PREHOOK: type: ALTERTABLE_ADDPARTS
 PREHOOK: Output: default@addpart1_temp
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. partition spec is invalid; field c does not exist or is empty
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. partition spec is invalid; field c does not exist or is empty
diff --git a/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure.q.out b/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure.q.out
index 66e4abb..c3bbc2b 100644
--- a/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure.q.out
+++ b/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure.q.out
@@ -34,4 +34,4 @@ POSTHOOK: Lineage: alter_rename_partition_temp PARTITION(pcol1=old_part1,pcol2=o
 PREHOOK: query: alter table alter_rename_partition_temp partition (pCol1='nonexist_part1', pcol2='nonexist_part2') rename to partition (pCol1='new_part1', pcol2='new_part2')
 PREHOOK: type: ALTERTABLE_RENAMEPART
 PREHOOK: Input: default@alter_rename_partition_temp
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Rename partition: source partition [pcol1=nonexist_part1/pcol2=nonexist_part2] does not exist.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Rename partition: source partition [pcol1=nonexist_part1/pcol2=nonexist_part2] does not exist.
diff --git a/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure2.q.out b/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure2.q.out
index 2e3b2c1..a8c7bb3 100644
--- a/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure2.q.out
+++ b/ql/src/test/results/clientnegative/temp_table_alter_rename_partition_failure2.q.out
@@ -43,4 +43,4 @@ PREHOOK: type: ALTERTABLE_RENAMEPART
 PREHOOK: Input: default@alter_rename_partition_temp
 PREHOOK: Output: default@alter_rename_partition_temp@pcol1=new_part1%3A/pcol2=new_part2%3A
 PREHOOK: Output: default@alter_rename_partition_temp@pcol1=old_part1%3A/pcol2=old_part2%3A
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition pcol1=new_part1%3A/pcol2=new_part2%3A already exists
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to rename partition. Partition pcol1=new_part1%3A/pcol2=new_part2%3A already exists
diff --git a/ql/src/test/results/clientnegative/temp_table_exchange_partitions.q.out b/ql/src/test/results/clientnegative/temp_table_exchange_partitions.q.out
index 5d94a7b..7785660 100644
--- a/ql/src/test/results/clientnegative/temp_table_exchange_partitions.q.out
+++ b/ql/src/test/results/clientnegative/temp_table_exchange_partitions.q.out
@@ -62,4 +62,4 @@ PREHOOK: query: ALTER TABLE ex1.exchange_part_test1 EXCHANGE PARTITION (ds='2013
 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
 PREHOOK: Input: ex2@exchange_part_test2
 PREHOOK: Output: ex1@exchange_part_test1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Exchanging partitions between temporary and non-temporary tables is not supported.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Exchanging partitions between temporary and non-temporary tables is not supported.)
diff --git a/ql/src/test/results/clientnegative/temp_table_rename.q.out b/ql/src/test/results/clientnegative/temp_table_rename.q.out
index e868e95..8191849 100644
--- a/ql/src/test/results/clientnegative/temp_table_rename.q.out
+++ b/ql/src/test/results/clientnegative/temp_table_rename.q.out
@@ -18,4 +18,4 @@ PREHOOK: query: alter table tmp2 rename to tmp1
 PREHOOK: type: ALTERTABLE_RENAME
 PREHOOK: Input: default@tmp2
 PREHOOK: Output: default@tmp2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Cannot rename temporary table to tmp1 - temporary table already exists with the same name
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Cannot rename temporary table to tmp1 - temporary table already exists with the same name
diff --git a/ql/src/test/results/clientnegative/touch1.q.out b/ql/src/test/results/clientnegative/touch1.q.out
index 9a62339..fc22130 100644
--- a/ql/src/test/results/clientnegative/touch1.q.out
+++ b/ql/src/test/results/clientnegative/touch1.q.out
@@ -1,4 +1,4 @@
 PREHOOK: query: ALTER TABLE srcpart TOUCH PARTITION (ds='2008-04-08', hr='13')
 PREHOOK: type: ALTERTABLE_TOUCH
 PREHOOK: Input: default@srcpart
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Specified partition does not exist
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Specified partition does not exist
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
index 354d048..8e651f1 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
@@ -202,7 +202,7 @@ PREHOOK: query: ALTER TABLE encrypted_db_outloc.renamed_encrypted_table_n1 RENAM
 PREHOOK: type: ALTERTABLE_RENAME
 PREHOOK: Input: encrypted_db_outloc@renamed_encrypted_table_n1
 PREHOOK: Output: encrypted_db_outloc@renamed_encrypted_table_n1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Alter Table operation for encrypted_db_outloc.renamed_encrypted_table_n1 failed to move data due to: 'Got exception: org.apache.hadoop.ipc.RemoteException /build/ql/test/data/specified_db_location/renamed_encrypted_table_n1 can't be moved from an encryption zone.' See hive log file for details.
+FAILED: Execution Error, return code 40013 from org.apache.hadoop.hive.ql.ddl.DDLTask. Unable to alter table. Alter Table operation for encrypted_db_outloc.renamed_encrypted_table_n1 failed to move data due to: 'Got exception: org.apache.hadoop.ipc.RemoteException /build/ql/test/data/specified_db_location/renamed_encrypted_table_n1 can't be moved from an encryption zone.' See hive log file for details.
 PREHOOK: query: SHOW TABLES
 PREHOOK: type: SHOWTABLES
 PREHOOK: Input: database:encrypted_db_outloc
@@ -230,7 +230,7 @@ PREHOOK: query: DROP DATABASE encrypted_db
 PREHOOK: type: DROPDATABASE
 PREHOOK: Input: database:encrypted_db
 PREHOOK: Output: database:encrypted_db
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database encrypted_db is not empty. One or more tables exist.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Database encrypted_db is not empty. One or more tables exist.)
 PREHOOK: query: DROP TABLE encrypted_db_outloc.renamed_encrypted_table_n1 PURGE
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: encrypted_db_outloc@renamed_encrypted_table_n1
diff --git a/ql/src/test/results/clientpositive/llap/resourceplan.q.out b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
index dddf147..b4b48d7 100644
--- a/ql/src/test/results/clientpositive/llap/resourceplan.q.out
+++ b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
@@ -485,7 +485,7 @@ plan_2	default	DISABLED	10	default
 PREHOOK: query: CREATE RESOURCE PLAN plan_2
 PREHOOK: type: CREATE RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Resource plan plan_2 already exists
+FAILED: Execution Error, return code 10417 from org.apache.hadoop.hive.ql.ddl.DDLTask. Resource plan plan_2 already exists
 PREHOOK: query: CREATE RESOURCE PLAN IF NOT EXISTS plan_2
 PREHOOK: type: CREATE RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
@@ -495,7 +495,7 @@ FAILED: SemanticException Invalid create arguments (tok_create_rp plan_3 (tok_qu
 PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_2
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Resource plan name should be unique: )
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Resource plan name should be unique: )
 PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_resourceplans
@@ -569,7 +569,7 @@ STAGE PLANS:
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find pool: default1)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find pool: default1)
 PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_resourceplans
@@ -616,11 +616,11 @@ STAGE PLANS:
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 RENAME TO plan_4
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
@@ -639,7 +639,7 @@ plan_3	default	DISABLED	NULL	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is disabled and should be enabled before activation (or in the same command))
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is disabled and should be enabled before activation (or in the same command))
 PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_resourceplans
@@ -713,7 +713,7 @@ plan_3	default	ACTIVE	NULL	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.)
 PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_resourceplans
@@ -727,7 +727,7 @@ plan_3	default	ACTIVE	NULL	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active; activate another plan first, or disable workload management.)
 PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_resourceplans
@@ -861,7 +861,7 @@ STAGE PLANS:
 PREHOOK: query: DROP RESOURCE PLAN plan_2
 PREHOOK: type: DROP RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop an active resource plan)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. MetaException(message:Cannot drop an active resource plan)
 PREHOOK: query: DROP RESOURCE PLAN plan_3
 PREHOOK: type: DROP RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
@@ -879,7 +879,7 @@ plan_2	default	ACTIVE	10	default
 PREHOOK: query: DROP RESOURCE PLAN plan_99999
 PREHOOK: type: DROP RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Resource plan plan_99999 does not exist
+FAILED: Execution Error, return code 10418 from org.apache.hadoop.hive.ql.ddl.DDLTask. Resource plan plan_99999 does not exist
 PREHOOK: query: DROP RESOURCE PLAN IF EXISTS plan_99999
 PREHOOK: type: DROP RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
@@ -975,7 +975,7 @@ plan_1	default	trigger_1	BYTES_READ > '10kb'	KILL
 PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN ELAPSED_TIME > 300 DO KILL
 PREHOOK: type: CREATE TRIGGER
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Trigger already exists, use alter: )
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Trigger already exists, use alter: )
 FAILED: ParseException line 4:60 mismatched input 'AND' expecting DO near ''30sec'' in create trigger statement
 FAILED: ParseException line 2:63 mismatched input 'OR' expecting DO near ''30second'' in create trigger statement
 FAILED: ParseException line 2:50 mismatched input '>=' expecting > near 'ELAPSED_TIME' in comparisionOperator
@@ -985,15 +985,15 @@ FAILED: ParseException line 2:50 mismatched input '=' expecting > near 'ELAPSED_
 PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '10k' DO KILL
 PREHOOK: type: CREATE TRIGGER
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Invalid size unit k
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Invalid size unit k
 PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '10 millis' DO KILL
 PREHOOK: type: CREATE TRIGGER
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Invalid time unit millis
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Invalid time unit millis
 PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > '-1000' DO KILL
 PREHOOK: type: CREATE TRIGGER
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Illegal value for counter limit. Expected a positive long value.
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.IllegalArgumentException: Illegal value for counter limit. Expected a positive long value.
 PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > '30hour' DO MOVE TO slow_pool
 PREHOOK: type: CREATE TRIGGER
 PREHOOK: Output: dummyHostnameForTest
@@ -1071,7 +1071,7 @@ plan_1	default	trigger_2	ELAPSED_TIME > '30hour'	MOVE TO slow_pool
 PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > '100mb' DO MOVE TO null_pool
 PREHOOK: type: CREATE TRIGGER
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
 PREHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > '100KB' DO MOVE TO `default`
 PREHOOK: type: CREATE TRIGGER
 PREHOOK: Output: dummyHostnameForTest
@@ -1148,11 +1148,11 @@ table	default	DISABLED	1	default
 PREHOOK: query: DROP TRIGGER plan_1.trigger_2
 PREHOOK: type: DROP TRIGGER
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
 PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > "1000gb" DO KILL
 PREHOOK: type: ALTER TRIGGER
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
 PREHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
@@ -1172,11 +1172,11 @@ table	default	DISABLED	1	default
 PREHOOK: query: DROP TRIGGER plan_1.trigger_2
 PREHOOK: type: DROP TRIGGER
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
 PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > "1000KB" DO KILL
 PREHOOK: type: ALTER TRIGGER
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
 PREHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
@@ -1224,13 +1224,13 @@ PREHOOK: query: CREATE POOL plan_1.default WITH
    ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default'
 PREHOOK: type: CREATE POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
 FAILED: SemanticException alloc_fraction should be specified for a pool
 FAILED: SemanticException query_parallelism should be specified for a pool
 PREHOOK: query: CREATE POOL plan_2.default WITH ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5
 PREHOOK: type: CREATE POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Pool already exists: )
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. AlreadyExistsException(message:Pool already exists: )
 PREHOOK: query: SELECT * FROM SYS.WM_POOLS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_pools
@@ -1246,7 +1246,7 @@ PREHOOK: query: CREATE POOL plan_2.default.c1 WITH
     ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='invalid'
 PREHOOK: type: CREATE POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid scheduling policy invalid
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Invalid scheduling policy invalid
 PREHOOK: query: CREATE POOL plan_2.default.c1 WITH
     ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair'
 PREHOOK: type: CREATE POOL
@@ -1290,7 +1290,7 @@ Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool:
 PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE
 PREHOOK: type: ALTER RESOURCEPLAN
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:ResourcePlan: plan_2 is invalid: [Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: default])
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:ResourcePlan: plan_2 is invalid: [Sum of children pools' alloc fraction should be less than 1 got: 1.05 for pool: default])
 PREHOOK: query: EXPLAIN ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1
 PREHOOK: type: ALTER POOL
 PREHOOK: Output: dummyHostnameForTest
@@ -1404,7 +1404,7 @@ STAGE PLANS:
 PREHOOK: query: DROP POOL plan_2.default
 PREHOOK: type: DROP POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot delete pool: default)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot delete pool: default)
 PREHOOK: query: SELECT * FROM SYS.WM_POOLS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_pools
@@ -1422,7 +1422,7 @@ PREHOOK: query: CREATE POOL plan_2.child1.child2 WITH
     QUERY_PARALLELISM=2, SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.8
 PREHOOK: type: CREATE POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Pool path is invalid, the parent does not exist)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Pool path is invalid, the parent does not exist)
 PREHOOK: query: CREATE POOL `table`.`table` WITH
   SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1
 PREHOOK: type: CREATE POOL
@@ -1498,7 +1498,7 @@ table	default	table.pool.child2	0.7	3	fair
 PREHOOK: query: DROP POOL `table`.`table`
 PREHOOK: type: DROP POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Cannot drop a pool that has child pools)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Cannot drop a pool that has child pools)
 PREHOOK: query: SELECT * FROM SYS.WM_POOLS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_pools
@@ -1519,7 +1519,7 @@ table	default	table.pool.child2	0.7	3	fair
 PREHOOK: query: DROP POOL `table`.default
 PREHOOK: type: DROP POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Cannot drop default pool of a resource plan)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Cannot drop default pool of a resource plan)
 PREHOOK: query: SELECT * FROM SYS.WM_POOLS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_pools
@@ -1769,11 +1769,11 @@ table	default	table.pool.child2	trigger2
 PREHOOK: query: ALTER POOL plan_2.default ADD TRIGGER trigger_1
 PREHOOK: type: ALTER POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find pool: default)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find pool: default)
 PREHOOK: query: ALTER POOL plan_2.def ADD TRIGGER trigger_2
 PREHOOK: type: ALTER POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2)
 PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_pools_to_triggers
@@ -1811,7 +1811,7 @@ POSTHOOK: type: ALTER POOL
 PREHOOK: query: ALTER POOL plan_2.def.c1 DROP TRIGGER trigger_2
 PREHOOK: type: ALTER POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. NoSuchObjectException(message:Cannot find trigger with name: trigger_2)
 PREHOOK: query: DROP POOL `table`.`table`.pool.child1
 PREHOOK: type: DROP POOL
 PREHOOK: Output: dummyHostnameForTest
@@ -1949,7 +1949,7 @@ plan_2	default	USER	user2	def	1
 PREHOOK: query: DROP POOL plan_2.def.c1
 PREHOOK: type: DROP POOL
 PREHOOK: Output: dummyHostnameForTest
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Please remove all mappings for this pool.)
+FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. InvalidOperationException(message:Please remove all mappings for this pool.)
 PREHOOK: query: EXPLAIN DROP USER MAPPING "user2" in plan_2
 PREHOOK: type: DROP MAPPING
 PREHOOK: Output: dummyHostnameForTest
diff --git a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
index c9a57c5..120de13 100644
--- a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
+++ b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
@@ -245,7 +245,7 @@ public abstract class CLIServiceTest {
     opStatus = runAsyncAndWait(sessionHandle, queryString, confOverlay, OperationState.ERROR, longPollingTimeout);
     // sqlState, errorCode should be set
     assertEquals(opStatus.getOperationException().getSQLState(), "08S01");
-    assertEquals(opStatus.getOperationException().getErrorCode(), 1);
+    assertEquals(opStatus.getOperationException().getErrorCode(), 40000);
     /**
      * Execute an async query with default config
      */
diff --git a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
index 18eff56..97ba39f 100644
--- a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
+++ b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
@@ -290,7 +290,7 @@ public abstract class ThriftCLIServiceTest {
         OperationState.ERROR, state);
     // sqlState, errorCode should be set to appropriate values
     assertEquals(opStatus.getOperationException().getSQLState(), "08S01");
-    assertEquals(opStatus.getOperationException().getErrorCode(), 1);
+    assertEquals(opStatus.getOperationException().getErrorCode(), 40000);
 
     // Cleanup
     queryString = "DROP TABLE TEST_EXEC_ASYNC_THRIFT";