You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by bh...@apache.org on 2020/07/14 15:57:53 UTC

[hadoop-ozone] branch ozone-0.6.0 updated: HDDS-3685. Remove replay logic from actual request logic. (#1082)

This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch ozone-0.6.0
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/ozone-0.6.0 by this push:
     new 2bd305b  HDDS-3685. Remove replay logic from actual request logic. (#1082)
2bd305b is described below

commit 2bd305b6da3aba90afca5e2b7f609c4bf54b420c
Author: Bharat Viswanadham <bh...@apache.org>
AuthorDate: Mon Jul 13 17:30:10 2020 -0700

    HDDS-3685. Remove replay logic from actual request logic. (#1082)
---
 .../hadoop/ozone/om/exceptions/OMException.java    |   2 -
 .../ozone/om/exceptions/OMReplayException.java     |  50 ---------
 .../src/main/proto/OmClientProtocol.proto          |   3 -
 .../interface-client/src/main/proto/proto.lock     |   4 -
 .../ozone/om/ratis/OzoneManagerStateMachine.java   |   8 --
 .../hadoop/ozone/om/request/OMClientRequest.java   |  39 +------
 .../om/request/bucket/OMBucketCreateRequest.java   |  26 ++---
 .../om/request/bucket/OMBucketDeleteRequest.java   |  26 ++---
 .../request/bucket/OMBucketSetPropertyRequest.java |  12 +--
 .../om/request/bucket/acl/OMBucketAclRequest.java  |  11 --
 .../om/request/file/OMDirectoryCreateRequest.java  |  47 ++------
 .../ozone/om/request/file/OMFileCreateRequest.java |  48 ++-------
 .../om/request/key/OMAllocateBlockRequest.java     |  50 ++-------
 .../ozone/om/request/key/OMKeyCommitRequest.java   |  74 ++-----------
 .../ozone/om/request/key/OMKeyCreateRequest.java   |  51 +++------
 .../ozone/om/request/key/OMKeyDeleteRequest.java   |  34 ++----
 .../ozone/om/request/key/OMKeyPurgeRequest.java    |  97 +----------------
 .../ozone/om/request/key/OMKeyRenameRequest.java   | 120 +++++----------------
 .../ozone/om/request/key/OMKeysDeleteRequest.java  |  37 ++-----
 .../ozone/om/request/key/acl/OMKeyAclRequest.java  |  24 +----
 .../om/request/key/acl/OMKeyAddAclRequest.java     |   6 --
 .../om/request/key/acl/OMKeyRemoveAclRequest.java  |   6 --
 .../om/request/key/acl/OMKeySetAclRequest.java     |   6 --
 .../request/key/acl/prefix/OMPrefixAclRequest.java |  30 +-----
 .../key/acl/prefix/OMPrefixAddAclRequest.java      |   6 --
 .../key/acl/prefix/OMPrefixRemoveAclRequest.java   |   6 --
 .../key/acl/prefix/OMPrefixSetAclRequest.java      |   6 --
 .../S3InitiateMultipartUploadRequest.java          |   5 -
 .../multipart/S3MultipartUploadAbortRequest.java   |  20 ----
 .../S3MultipartUploadCommitPartRequest.java        |  34 ++----
 .../S3MultipartUploadCompleteRequest.java          |  82 ++------------
 .../om/request/volume/OMVolumeCreateRequest.java   |  23 +---
 .../om/request/volume/OMVolumeDeleteRequest.java   |  10 --
 .../om/request/volume/OMVolumeSetOwnerRequest.java |  57 +++-------
 .../om/request/volume/OMVolumeSetQuotaRequest.java |  20 +---
 .../om/request/volume/acl/OMVolumeAclRequest.java  |  52 +++------
 .../request/volume/acl/OMVolumeAddAclRequest.java  |   6 --
 .../volume/acl/OMVolumeRemoveAclRequest.java       |   6 --
 .../request/volume/acl/OMVolumeSetAclRequest.java  |   6 --
 .../hadoop/ozone/om/response/OMClientResponse.java |   2 +-
 .../om/response/bucket/OMBucketCreateResponse.java |   2 +-
 .../om/response/bucket/OMBucketDeleteResponse.java |   2 +-
 .../bucket/OMBucketSetPropertyResponse.java        |   2 +-
 .../response/bucket/acl/OMBucketAclResponse.java   |   2 +-
 .../response/file/OMDirectoryCreateResponse.java   |  44 ++++----
 .../om/response/file/OMFileCreateResponse.java     |   4 +-
 .../om/response/key/OMAllocateBlockResponse.java   |   2 +-
 .../ozone/om/response/key/OMKeyCommitResponse.java |  24 +----
 .../ozone/om/response/key/OMKeyCreateResponse.java |   2 +-
 .../ozone/om/response/key/OMKeyDeleteResponse.java |  46 ++++----
 .../ozone/om/response/key/OMKeyPurgeResponse.java  |  11 +-
 .../ozone/om/response/key/OMKeyRenameResponse.java |  59 ++--------
 .../om/response/key/acl/OMKeyAclResponse.java      |   2 +-
 .../key/acl/prefix/OMPrefixAclResponse.java        |   2 +-
 .../S3InitiateMultipartUploadResponse.java         |   2 +-
 .../multipart/S3MultipartUploadAbortResponse.java  |   2 +-
 .../S3MultipartUploadCommitPartResponse.java       |  40 ++-----
 .../S3MultipartUploadCompleteResponse.java         |  48 +++------
 .../om/response/volume/OMVolumeAclOpResponse.java  |   2 +-
 .../om/response/volume/OMVolumeCreateResponse.java |   2 +-
 .../om/response/volume/OMVolumeDeleteResponse.java |   2 +-
 .../response/volume/OMVolumeSetOwnerResponse.java  |   4 +-
 .../response/volume/OMVolumeSetQuotaResponse.java  |   2 +-
 .../request/bucket/TestOMBucketCreateRequest.java  |  28 -----
 .../request/bucket/TestOMBucketDeleteRequest.java  |  42 --------
 .../bucket/TestOMBucketSetPropertyRequest.java     |  29 -----
 .../om/request/file/TestOMFileCreateRequest.java   |  29 -----
 .../ozone/om/request/key/TestOMKeyAclRequest.java  |   9 +-
 .../om/request/key/TestOMKeyCommitRequest.java     |  79 --------------
 .../om/request/key/TestOMKeyCreateRequest.java     |  42 --------
 .../om/request/key/TestOMKeyDeleteRequest.java     |  37 -------
 .../key/TestOMKeyPurgeRequestAndResponse.java      | 117 --------------------
 .../om/request/key/TestOMKeyRenameRequest.java     | 102 ------------------
 .../om/request/key/TestOMPrefixAclRequest.java     |  11 +-
 .../request/volume/TestOMVolumeCreateRequest.java  |  26 -----
 .../request/volume/TestOMVolumeDeleteRequest.java  |  38 -------
 .../volume/TestOMVolumeSetOwnerRequest.java        |  31 ------
 .../volume/TestOMVolumeSetQuotaRequest.java        |  30 ------
 .../volume/acl/TestOMVolumeAddAclRequest.java      |  33 ------
 .../volume/acl/TestOMVolumeRemoveAclRequest.java   |  44 --------
 .../volume/acl/TestOMVolumeSetAclRequest.java      |  35 ------
 .../file/TestOMDirectoryCreateResponse.java        |   5 +-
 .../om/response/key/TestOMKeyDeleteResponse.java   |   2 +-
 83 files changed, 282 insertions(+), 1947 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index 58d5a02..1eed619 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -222,7 +222,5 @@ public class OMException extends IOException {
     DIRECTORY_ALREADY_EXISTS,
 
     INVALID_VOLUME_NAME,
-
-    REPLAY // When ratis logs are replayed.
   }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMReplayException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMReplayException.java
deleted file mode 100644
index 0eeb873..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMReplayException.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.exceptions;
-
-import java.io.IOException;
-
-/**
- * Exception thrown by Ozone Manager when a transaction is replayed. This
- * exception should not be thrown to client. It is used in
- * OMClientRequest#validateAndUpdateCache to log error and continue in case
- * of replay transaction.
- */
-public class OMReplayException extends IOException {
-
-  private final boolean needsDBOperations;
-
-  public OMReplayException() {
-    this(false);
-  }
-
-  /**
-   * When the transaction is a replay but still needs some DB operations to
-   * be performed (such as cleanup of old keys).
-   * @param needsDBOperations
-   */
-  public OMReplayException(boolean needsDBOperations) {
-    // Dummy message. This exception is not thrown to client.
-    super("Replayed transaction");
-    this.needsDBOperations = needsDBOperations;
-  }
-
-  public boolean isDBOperationNeeded() {
-    return needsDBOperations;
-  }
-}
\ No newline at end of file
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index ba193c7..f4cf79a 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -302,9 +302,6 @@ enum Status {
     DIRECTORY_ALREADY_EXISTS = 60;
 
     INVALID_VOLUME_NAME = 61;
-
-    // When transactions are replayed
-    REPLAY = 100;
 }
 
 /**
diff --git a/hadoop-ozone/interface-client/src/main/proto/proto.lock b/hadoop-ozone/interface-client/src/main/proto/proto.lock
index 2d90e1c..f591ad1 100644
--- a/hadoop-ozone/interface-client/src/main/proto/proto.lock
+++ b/hadoop-ozone/interface-client/src/main/proto/proto.lock
@@ -415,10 +415,6 @@
               {
                 "name": "INVALID_VOLUME_NAME",
                 "integer": 61
-              },
-              {
-                "name": "REPLAY",
-                "integer": 100
               }
             ]
           },
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
index 3aff87a..c042fcb 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.ratis;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.ServiceException;
 import java.io.IOException;
@@ -64,7 +63,6 @@ import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.METADATA_ERROR;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.REPLAY;
 
 /**
  * The OM StateMachine is the state machine for OM Ratis server. It is
@@ -258,12 +256,6 @@ public class OzoneManagerStateMachine extends BaseStateMachine {
             terminate(omResponse, OMException.ResultCodes.INTERNAL_ERROR);
           } else if (omResponse.getStatus() == METADATA_ERROR) {
             terminate(omResponse, OMException.ResultCodes.METADATA_ERROR);
-          } else if (omResponse.getStatus() == REPLAY) {
-            // For replay we do not add response to double buffer, so update
-            // LastAppliedIndex for the replay transactions here.
-            computeAndUpdateLastAppliedIndex(trxLogIndex,
-                trx.getLogEntry().getTerm(), Lists.newArrayList(trxLogIndex),
-                true);
           }
         }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index 0353144..3ce059f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.audit.AuditMessage;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.WithObjectID;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -54,7 +53,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 
 import javax.annotation.Nonnull;
 
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.REPLAY;
 
 /**
  * OMClientRequest provides methods which every write OM request should
@@ -71,8 +69,6 @@ public abstract class OMClientRequest implements RequestAuditor {
   public enum Result {
     SUCCESS, // The request was executed successfully
 
-    REPLAY, // The request is a replay and was ignored
-
     FAILURE // The request failed and exception was thrown
   }
 
@@ -256,7 +252,6 @@ public abstract class OMClientRequest implements RequestAuditor {
 
   /**
    * Add the client response to double buffer and set the flush future.
-   * For responses which has status set to REPLAY it is a no-op.
    * @param trxIndex
    * @param omClientResponse
    * @param omDoubleBufferHelper
@@ -265,13 +260,8 @@ public abstract class OMClientRequest implements RequestAuditor {
       OMClientResponse omClientResponse,
       OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
     if (omClientResponse != null) {
-      // For replay transaction we do not need to add to double buffer, as
-      // for these transactions there is nothing needs to be done for
-      // addDBToBatch.
-      if (omClientResponse.getOMResponse().getStatus() != REPLAY) {
-        omClientResponse.setFlushFuture(
-            omDoubleBufferHelper.add(omClientResponse, trxIndex));
-      }
+      omClientResponse.setFlushFuture(
+          omDoubleBufferHelper.add(omClientResponse, trxIndex));
     }
   }
 
@@ -313,29 +303,4 @@ public abstract class OMClientRequest implements RequestAuditor {
     auditMap.put(OzoneConsts.VOLUME, volume);
     return auditMap;
   }
-
-  /**
-   * Check if the transaction is a replay.
-   * @param ozoneObj OMVolumeArgs or OMBucketInfo or OMKeyInfo object whose 
-   *                 updateID needs to be compared with
-   * @param transactionID the current transaction ID
-   * @return true if transactionID is less than or equal to updateID, false
-   * otherwise.
-   */
-  protected boolean isReplay(OzoneManager om, WithObjectID ozoneObj,
-      long transactionID) {
-    return om.isRatisEnabled() && ozoneObj.isUpdateIDset() &&
-        transactionID <= ozoneObj.getUpdateID();
-  }
-
-  /**
-   * Return a dummy OMClientResponse for when the transactions are replayed.
-   */
-  protected OMResponse createReplayOMResponse(
-      @Nonnull OMResponse.Builder omResponse) {
-
-    omResponse.setSuccess(false);
-    omResponse.setStatus(REPLAY);
-    return omResponse.build();
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index 8181a64..9d7d133 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -69,6 +69,8 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -167,27 +169,13 @@ public class OMBucketCreateRequest extends OMClientRequest {
       //Check if the volume exists
       if (omVolumeArgs == null) {
         LOG.debug("volume: {} not found ", volumeName);
-        throw new OMException("Volume doesn't exist",
-            OMException.ResultCodes.VOLUME_NOT_FOUND);
+        throw new OMException("Volume doesn't exist", VOLUME_NOT_FOUND);
       }
 
       //Check if bucket already exists
-      OmBucketInfo dbBucketInfo = metadataManager.getBucketTable()
-          .getReadCopy(bucketKey);
-      if (dbBucketInfo != null) {
-        // Check if this transaction is a replay of ratis logs.
-        if (isReplay(ozoneManager, dbBucketInfo, transactionLogIndex)) {
-          // Replay implies the response has already been returned to
-          // the client. So take no further action and return a dummy
-          // OMClientResponse.
-          LOG.debug("Replayed Transaction {} ignored. Request: {}",
-              transactionLogIndex, createBucketRequest);
-          return new OMBucketCreateResponse(createReplayOMResponse(omResponse));
-        } else {
-          LOG.debug("bucket: {} already exists ", bucketName);
-          throw new OMException("Bucket already exist",
-              OMException.ResultCodes.BUCKET_ALREADY_EXISTS);
-        }
+      if (metadataManager.getBucketTable().isExist(bucketKey)) {
+        LOG.debug("bucket: {} already exists ", bucketName);
+        throw new OMException("Bucket already exist", BUCKET_ALREADY_EXISTS);
       }
 
       // Add objectID and updateID
@@ -211,7 +199,7 @@ public class OMBucketCreateRequest extends OMClientRequest {
     } catch (IOException ex) {
       exception = ex;
       omClientResponse = new OMBucketCreateResponse(
-          createErrorOMResponse(omResponse, exception), omBucketInfo);
+          createErrorOMResponse(omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(transactionLogIndex, omClientResponse,
           ozoneManagerDoubleBufferHelper);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
index 18bf3ae..91aef6a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -52,6 +51,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
 
@@ -102,7 +102,6 @@ public class OMBucketDeleteRequest extends OMClientRequest {
             volumeName, bucketName, null);
       }
 
-
       // acquire lock
       acquiredVolumeLock =
           omMetadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName);
@@ -111,25 +110,12 @@ public class OMBucketDeleteRequest extends OMClientRequest {
           volumeName, bucketName);
 
       // No need to check volume exists here, as bucket cannot be created
-      // with out volume creation.
-      //Check if bucket exists
+      // with out volume creation. Check if bucket exists
       String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-      OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable()
-          .getReadCopy(bucketKey);
-      if (omBucketInfo == null) {
-        LOG.debug("bucket: {} not found ", bucketName);
-        throw new OMException("Bucket doesn't exist",
-            OMException.ResultCodes.BUCKET_NOT_FOUND);
-      }
 
-      // Check if this transaction is a replay of ratis logs.
-      // If this is a replay, then the response has already been returned to
-      // the client. So take no further action and return a dummy
-      // OMClientResponse.
-      if (isReplay(ozoneManager, omBucketInfo, transactionLogIndex)) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}",
-            transactionLogIndex, deleteBucketRequest);
-        return new OMBucketDeleteResponse(createReplayOMResponse(omResponse));
+      if (!omMetadataManager.getBucketTable().isExist(bucketKey)) {
+        LOG.debug("bucket: {} not found ", bucketName);
+        throw new OMException("Bucket already exist", BUCKET_NOT_FOUND);
       }
 
       //Check if bucket is empty
@@ -155,7 +141,7 @@ public class OMBucketDeleteRequest extends OMClientRequest {
       success = false;
       exception = ex;
       omClientResponse = new OMBucketDeleteResponse(
-          createErrorOMResponse(omResponse, exception), volumeName, bucketName);
+          createErrorOMResponse(omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(transactionLogIndex, omClientResponse,
           ozoneManagerDoubleBufferHelper);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
index 2288de7..d90f08e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
@@ -121,16 +121,6 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
             OMException.ResultCodes.BUCKET_NOT_FOUND);
       }
 
-      // Check if this transaction is a replay of ratis logs.
-      // If a replay, then the response has already been returned to the
-      // client. So take no further action and return a dummy OMClientResponse.
-      if (isReplay(ozoneManager, dbBucketInfo, transactionLogIndex)) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}",
-            transactionLogIndex, setBucketPropertyRequest);
-        return new OMBucketSetPropertyResponse(
-            createReplayOMResponse(omResponse));
-      }
-
       OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder();
       bucketInfoBuilder.setVolumeName(dbBucketInfo.getVolumeName())
           .setBucketName(dbBucketInfo.getBucketName())
@@ -190,7 +180,7 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
       success = false;
       exception = ex;
       omClientResponse = new OMBucketSetPropertyResponse(
-          createErrorOMResponse(omResponse, exception), omBucketInfo);
+          createErrorOMResponse(omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(transactionLogIndex, omClientResponse,
           ozoneManagerDoubleBufferHelper);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java
index f162e88..a493f9f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.response.bucket.acl.OMBucketAclResponse;
 import org.apache.hadoop.ozone.util.BooleanBiFunction;
 import org.apache.hadoop.ozone.om.request.util.ObjectParser;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -106,16 +105,6 @@ public abstract class OMBucketAclRequest extends OMClientRequest {
         throw new OMException(OMException.ResultCodes.BUCKET_NOT_FOUND);
       }
 
-      // Check if this transaction is a replay of ratis logs.
-      // If this is a replay, then the response has already been returned to
-      // the client. So take no further action and return a dummy
-      // OMClientResponse.
-      if (isReplay(ozoneManager, omBucketInfo, transactionLogIndex)) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}",
-            transactionLogIndex, getOmRequest());
-        return new OMBucketAclResponse(createReplayOMResponse(omResponse));
-      }
-
       operationResult = omBucketAclOp.apply(ozoneAcls, omBucketInfo);
       omBucketInfo.setUpdateID(transactionLogIndex,
           ozoneManager.isRatisEnabled());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index 7f860fc..ec51333 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -29,7 +29,6 @@ import java.util.Map;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
@@ -91,8 +90,6 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
   public enum Result {
     SUCCESS, // The request was executed successfully
 
-    REPLAY, // The request is a replay and was ignored
-
     DIRECTORY_ALREADY_EXISTS, // Directory key already exists in DB
 
     FAILURE // The request failed and exception was thrown
@@ -197,34 +194,20 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
         OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName,
             bucketName, Optional.of(dirKeyInfo),
             Optional.of(missingParentInfos), trxnLogIndex);
-
-        omClientResponse = new OMDirectoryCreateResponse(omResponse.build(),
-            dirKeyInfo, missingParentInfos);
         result = Result.SUCCESS;
+        omClientResponse = new OMDirectoryCreateResponse(omResponse.build(),
+            dirKeyInfo, missingParentInfos, result);
       } else {
         // omDirectoryResult == DIRECTORY_EXITS
-        // Check if this is a replay of ratis logs
-        String dirKey = omMetadataManager.getOzoneDirKey(volumeName,
-            bucketName, keyName);
-        OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().get(dirKey);
-        if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) {
-          throw new OMReplayException();
-        } else {
-          result = Result.DIRECTORY_ALREADY_EXISTS;
-          omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS);
-          omClientResponse = new OMDirectoryCreateResponse(omResponse.build());
-        }
+        result = Result.DIRECTORY_ALREADY_EXISTS;
+        omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS);
+        omClientResponse = new OMDirectoryCreateResponse(omResponse.build(),
+            result);
       }
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        result = Result.REPLAY;
-        omClientResponse = new OMDirectoryCreateResponse(
-            createReplayOMResponse(omResponse));
-      } else {
-        exception = ex;
-        omClientResponse = new OMDirectoryCreateResponse(
-            createErrorOMResponse(omResponse, exception));
-      }
+      exception = ex;
+      omClientResponse = new OMDirectoryCreateResponse(
+          createErrorOMResponse(omResponse, exception), result);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -234,10 +217,8 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
       }
     }
 
-    if (result != Result.REPLAY) {
-      auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY,
-          auditMap, exception, userInfo));
-    }
+    auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY,
+        auditMap, exception, userInfo));
 
     logResult(createDirectoryRequest, keyArgs, omMetrics, result, trxnLogIndex,
         exception);
@@ -314,12 +295,6 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
             volumeName, bucketName, keyName);
       }
       break;
-    case REPLAY:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            createDirectoryRequest);
-      }
-      break;
     case DIRECTORY_ALREADY_EXISTS:
       if (LOG.isDebugEnabled()) {
         LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}",
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index 4db8f80..3b0b02b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
 import org.slf4j.Logger;
@@ -216,27 +215,10 @@ public class OMFileCreateRequest extends OMKeyRequest {
             OMException.ResultCodes.NOT_A_FILE);
       }
 
-      // Check if Key already exists in KeyTable and this transaction is a
-      // replay.
       String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
           keyName);
       OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable()
           .getIfExist(ozoneKey);
-      if (dbKeyInfo != null) {
-        // Check if this transaction is a replay of ratis logs.
-        // We check only the KeyTable here and not the OpenKeyTable. In case
-        // this transaction is a replay but the transaction was not committed
-        // to the KeyTable, then we recreate the key in OpenKey table. This is
-        // okay as all the subsequent transactions would also be replayed and
-        // the openKey table would eventually reach the same state.
-        // The reason we do not check the OpenKey table is to avoid a DB read
-        // in regular non-replay scenario.
-        if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) {
-          // Replay implies the response has already been returned to
-          // the client. So take no further action and return a dummy response.
-          throw new OMReplayException();
-        }
-      }
 
       OMFileRequest.OMPathInfo pathInfo =
           OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName,
@@ -312,18 +294,12 @@ public class OMFileCreateRequest extends OMKeyRequest {
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        result = Result.REPLAY;
-        omClientResponse = new OMFileCreateResponse(createReplayOMResponse(
-            omResponse));
-      } else {
-        result = Result.FAILURE;
-        exception = ex;
-        omMetrics.incNumCreateFileFails();
-        omResponse.setCmdType(Type.CreateFile);
-        omClientResponse = new OMFileCreateResponse(createErrorOMResponse(
+      result = Result.FAILURE;
+      exception = ex;
+      omMetrics.incNumCreateFileFails();
+      omResponse.setCmdType(Type.CreateFile);
+      omClientResponse = new OMFileCreateResponse(createErrorOMResponse(
             omResponse, exception));
-      }
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -334,18 +310,12 @@ public class OMFileCreateRequest extends OMKeyRequest {
     }
 
     // Audit Log outside the lock
-    if (result != Result.REPLAY) {
-      Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
-      auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-          OMAction.CREATE_FILE, auditMap, exception,
-          getOmRequest().getUserInfo()));
-    }
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+        OMAction.CREATE_FILE, auditMap, exception,
+        getOmRequest().getUserInfo()));
 
     switch (result) {
-    case REPLAY:
-      LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-          createFileRequest);
-      break;
     case SUCCESS:
       LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName,
           bucketName, keyName);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
index 348c96a..1a39e0b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
@@ -25,7 +25,6 @@ import java.util.Map;
 
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -169,9 +168,8 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
         getOmRequest());
     OMClientResponse omClientResponse = null;
 
-    OmKeyInfo openKeyInfo = null;
+    OmKeyInfo openKeyInfo;
     IOException exception = null;
-    Result result = null;
 
     try {
       // check Acl
@@ -186,30 +184,10 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
 
       openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKeyName);
       if (openKeyInfo == null) {
-        // Check if this transaction is a replay of ratis logs.
-        // If the Key was already committed and this transaction is being
-        // replayed, we should ignore this transaction.
-        String ozoneKey = omMetadataManager.getOzoneKey(volumeName,
-            bucketName, keyName);
-        OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-        if (dbKeyInfo != null) {
-          if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) {
-            // This transaction is a replay. Send replay response.
-            throw new OMReplayException();
-          }
-        }
         throw new OMException("Open Key not found " + openKeyName,
             KEY_NOT_FOUND);
       }
 
-      // Check if this transaction is a replay of ratis logs.
-      // Check the updateID of the openKey to verify that it is not greater
-      // than the current transactionLogIndex
-      if (isReplay(ozoneManager, openKeyInfo, trxnLogIndex)) {
-        // This transaction is a replay. Send replay response.
-        throw new OMReplayException();
-      }
-
       // Append new block
       openKeyInfo.appendNewBlocks(Collections.singletonList(
           OmKeyLocationInfo.getFromProtobuf(blockLocation)), false);
@@ -229,35 +207,23 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
           .setKeyLocation(blockLocation).build());
       omClientResponse = new OMAllocateBlockResponse(omResponse.build(),
           openKeyInfo, clientID);
-      result = Result.SUCCESS;
 
       LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}",
           volumeName, bucketName, openKeyName);
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        result = Result.REPLAY;
-        omClientResponse = new OMAllocateBlockResponse(createReplayOMResponse(
-            omResponse));
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            allocateBlockRequest);
-      } else {
-        result = Result.FAILURE;
-        omMetrics.incNumBlockAllocateCallFails();
-        exception = ex;
-        omClientResponse = new OMAllocateBlockResponse(createErrorOMResponse(
-            omResponse, exception));
-        LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " +
+      omMetrics.incNumBlockAllocateCallFails();
+      exception = ex;
+      omClientResponse = new OMAllocateBlockResponse(createErrorOMResponse(
+          omResponse, exception));
+      LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " +
             "Exception:{}", volumeName, bucketName, openKeyName, exception);
-      }
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
     }
 
-    if (result != Result.REPLAY) {
-      auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap,
-          exception, getOmRequest().getUserInfo()));
-    }
+    auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap,
+        exception, getOmRequest().getUserInfo()));
 
 
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index 7ee7db5..edeea3d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -71,13 +70,6 @@ public class OMKeyCommitRequest extends OMKeyRequest {
   private static final Logger LOG =
       LoggerFactory.getLogger(OMKeyCommitRequest.class);
 
-  private enum Result {
-    SUCCESS,
-    REPLAY,
-    DELETE_OPEN_KEY_ONLY,
-    FAILURE
-  }
-
   public OMKeyCommitRequest(OMRequest omRequest) {
     super(omRequest);
   }
@@ -152,44 +144,18 @@ public class OMKeyCommitRequest extends OMKeyRequest {
         locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation));
       }
 
-      bucketLockAcquired = omMetadataManager.getLock().acquireLock(BUCKET_LOCK,
-          volumeName, bucketName);
+      bucketLockAcquired = omMetadataManager.getLock().acquireWriteLock(
+          BUCKET_LOCK, volumeName, bucketName);
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
-      // Revisit this logic to see how we can skip this check when ratis is
-      // enabled.
-      if (ozoneManager.isRatisEnabled()) {
-        // Check if OzoneKey already exists in DB
-        OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable()
-            .getIfExist(dbOzoneKey);
-        if (dbKeyInfo != null) {
-          // Check if this transaction is a replay of ratis logs
-          if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) {
-            // During KeyCreate, we do not check the OpenKey Table for replay.
-            // This is so as to avoid an extra DB read during KeyCreate.
-            // If KeyCommit is a replay, the KeyCreate request could also have
-            // been replayed. And since we do not check for replay in KeyCreate,
-            // we should scrub the key from OpenKey table now, is it exists.
-
-            omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey);
-            if (omKeyInfo != null) {
-              omMetadataManager.getOpenKeyTable().addCacheEntry(
-                  new CacheKey<>(dbOpenKey),
-                  new CacheValue<>(Optional.absent(), trxnLogIndex));
-
-              throw new OMReplayException(true);
-            }
-            throw new OMReplayException();
-          }
-        }
-      }
-
       omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey);
+
       if (omKeyInfo == null) {
         throw new OMException("Failed to commit key, as " + dbOpenKey +
             "entry is not found in the OpenKey table", KEY_NOT_FOUND);
       }
+
       omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
 
       omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
@@ -214,22 +180,10 @@ public class OMKeyCommitRequest extends OMKeyRequest {
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        if (((OMReplayException) ex).isDBOperationNeeded()) {
-          result = Result.DELETE_OPEN_KEY_ONLY;
-          omClientResponse = new OMKeyCommitResponse(omResponse.build(),
-              dbOpenKey);
-        } else {
-          result = Result.REPLAY;
-          omClientResponse = new OMKeyCommitResponse(createReplayOMResponse(
-              omResponse));
-        }
-      } else {
-        result = Result.FAILURE;
-        exception = ex;
-        omClientResponse = new OMKeyCommitResponse(createErrorOMResponse(
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new OMKeyCommitResponse(createErrorOMResponse(
             omResponse, exception));
-      }
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -240,11 +194,8 @@ public class OMKeyCommitRequest extends OMKeyRequest {
       }
     }
 
-    // Performing audit logging outside of the lock.
-    if (result != Result.REPLAY && result != Result.DELETE_OPEN_KEY_ONLY) {
-      auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
+    auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
           exception, getOmRequest().getUserInfo()));
-    }
 
     switch (result) {
     case SUCCESS:
@@ -253,21 +204,12 @@ public class OMKeyCommitRequest extends OMKeyRequest {
       // As key also can have multiple versions, we need to increment keys
       // only if version is 0. Currently we have not complete support of
       // versioning of keys. So, this can be revisited later.
-
       if (omKeyInfo.getKeyLocationVersions().size() == 1) {
         omMetrics.incNumKeys();
       }
       LOG.debug("Key commited. Volume:{}, Bucket:{}, Key:{}", volumeName,
           bucketName, keyName);
       break;
-    case REPLAY:
-      LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-          commitKeyRequest);
-      break;
-    case DELETE_OPEN_KEY_ONLY:
-      LOG.debug("Replayed Transaction {}. Deleting old key {} from OpenKey " +
-          "table. Request: {}", trxnLogIndex, dbOpenKey, commitKeyRequest);
-      break;
     case FAILURE:
       LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}. Exception:{}",
           volumeName, bucketName, keyName, exception);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index 3f4266f..c6a7e52 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -201,22 +200,6 @@ public class OMKeyCreateRequest extends OMKeyRequest {
           keyName);
       OmKeyInfo dbKeyInfo =
           omMetadataManager.getKeyTable().getIfExist(dbKeyName);
-      if (dbKeyInfo != null) {
-        // Check if this transaction is a replay of ratis logs.
-        // We check only the KeyTable here and not the OpenKeyTable. In case
-        // this transaction is a replay but the transaction was not committed
-        // to the KeyTable, then we recreate the key in OpenKey table. This is
-        // okay as all the subsequent transactions would also be replayed and
-        // the openKey table would eventually reach the same state.
-        // The reason we do not check the OpenKey table is to avoid a DB read
-        // in regular non-replay scenario.
-        if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) {
-          // Replay implies the response has already been returned to
-          // the client. So take no further action and return a dummy
-          // OMClientResponse.
-          throw new OMReplayException();
-        }
-      }
 
       OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
           omMetadataManager.getBucketKey(volumeName, bucketName));
@@ -254,18 +237,12 @@ public class OMKeyCreateRequest extends OMKeyRequest {
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        result = Result.REPLAY;
-        omClientResponse = new OMKeyCreateResponse(createReplayOMResponse(
-            omResponse));
-      } else {
-        result = Result.FAILURE;
-        exception = ex;
-        omMetrics.incNumKeyAllocateFails();
-        omResponse.setCmdType(Type.CreateKey);
-        omClientResponse = new OMKeyCreateResponse(createErrorOMResponse(
-            omResponse, exception));
-      }
+      result = Result.FAILURE;
+      exception = ex;
+      omMetrics.incNumKeyAllocateFails();
+      omResponse.setCmdType(Type.CreateKey);
+      omClientResponse = new OMKeyCreateResponse(
+          createErrorOMResponse(omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -276,22 +253,18 @@ public class OMKeyCreateRequest extends OMKeyRequest {
     }
 
     // Audit Log outside the lock
-    if (result != Result.REPLAY) {
-      Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
-      auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-          OMAction.ALLOCATE_KEY, auditMap, exception,
-          getOmRequest().getUserInfo()));
-    }
+
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+        OMAction.ALLOCATE_KEY, auditMap, exception,
+        getOmRequest().getUserInfo()));
+
 
     switch (result) {
     case SUCCESS:
       LOG.debug("Key created. Volume:{}, Bucket:{}, Key:{}", volumeName,
           bucketName, keyName);
       break;
-    case REPLAY:
-      LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-          createKeyRequest);
-      break;
     case FAILURE:
       LOG.error("Key creation failed. Volume:{}, Bucket:{}, Key{}. " +
               "Exception:{}", volumeName, bucketName, keyName, exception);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
index 167330a..b0eb6fd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse;
@@ -130,14 +129,6 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
         throw new OMException("Key not found", KEY_NOT_FOUND);
       }
 
-      // Check if this transaction is a replay of ratis logs.
-      if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) {
-        // Replay implies the response has already been returned to
-        // the client. So take no further action and return a dummy
-        // OMClientResponse.
-        throw new OMReplayException();
-      }
-
       // Set the UpdateID to current transactionLogIndex
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
@@ -158,16 +149,10 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        result = Result.REPLAY;
-        omClientResponse = new OMKeyDeleteResponse(createReplayOMResponse(
-            omResponse));
-      } else {
-        result = Result.FAILURE;
-        exception = ex;
-        omClientResponse = new OMKeyDeleteResponse(createErrorOMResponse(
-            omResponse, exception));
-      }
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new OMKeyDeleteResponse(
+          createErrorOMResponse(omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
             omDoubleBufferHelper);
@@ -178,10 +163,9 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
     }
 
     // Performing audit logging outside of the lock.
-    if (result != Result.REPLAY) {
-      auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap,
-          exception, userInfo));
-    }
+    auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap,
+        exception, userInfo));
+
 
     switch (result) {
     case SUCCESS:
@@ -189,10 +173,6 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
       LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName,
           bucketName, keyName);
       break;
-    case REPLAY:
-      LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-          deleteKeyRequest);
-      break;
     case FAILURE:
       omMetrics.incNumKeyDeleteFails();
       LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key:{}.",
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
index f7783db..ce7f1e9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
@@ -18,12 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
-import java.io.IOException;
 import java.util.ArrayList;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -37,8 +33,6 @@ import org.slf4j.LoggerFactory;
 
 import java.util.List;
 
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-
 /**
  * Handles purging of keys from OM DB.
  */
@@ -54,9 +48,6 @@ public class OMKeyPurgeRequest extends OMKeyRequest {
   @Override
   public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
       long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
     PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest();
     List<DeletedKeys> bucketDeletedKeysList = purgeKeysRequest
         .getDeletedKeysList();
@@ -65,97 +56,19 @@ public class OMKeyPurgeRequest extends OMKeyRequest {
     OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
         getOmRequest());
     OMClientResponse omClientResponse = null;
-    boolean success = true;
-    IOException exception = null;
 
-    // Filter the keys that have updateID > transactionLogIndex. This is done so
-    // that in case this transaction is a replay, we do not purge keys
-    // created after the original purge request.
-    // PurgeKeys request has keys belonging to same bucket grouped together.
-    // We get each bucket lock and check the above condition.
-    for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) {
-      boolean acquiredLock = false;
-      String volumeName = bucketWithDeleteKeys.getVolumeName();
-      String bucketName = bucketWithDeleteKeys.getBucketName();
-      ArrayList<String> keysNotPurged = new ArrayList<>();
-      Result result = null;
-      try {
-        acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-            volumeName, bucketName);
-        for (String deletedKey : bucketWithDeleteKeys.getKeysList()) {
-          RepeatedOmKeyInfo repeatedOmKeyInfo =
-              omMetadataManager.getDeletedTable().get(deletedKey);
-          boolean purgeKey = true;
-          if (repeatedOmKeyInfo != null) {
-            for (OmKeyInfo omKeyInfo : repeatedOmKeyInfo.getOmKeyInfoList()) {
-              // Discard those keys whose updateID is > transactionLogIndex.
-              // This could happen when the PurgeRequest is replayed.
-              if (isReplay(ozoneManager, omKeyInfo,
-                  trxnLogIndex)) {
-                purgeKey = false;
-                result = Result.REPLAY;
-                break;
-              }
-              // TODO: If a deletedKey has any one OmKeyInfo which was
-              //  deleted after the original PurgeRequest (updateID >
-              //  trxnLogIndex), we avoid purging that whole key in the
-              //  replay request. Instead of discarding the whole key, we can
-              //  identify the OmKeyInfo's which have updateID <
-              //  trxnLogIndex and purge only those OMKeyInfo's from the
-              //  deletedKey in DeletedTable.
-            }
-            if (purgeKey) {
-              keysToBePurgedList.add(deletedKey);
-            } else {
-              keysNotPurged.add(deletedKey);
-            }
-          }
-        }
-      } catch (IOException ex) {
-        success = false;
-        exception = ex;
-        break;
-      } finally {
-        if (acquiredLock) {
-          omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-              bucketName);
-        }
-      }
 
-      if (result == Result.REPLAY) {
-        LOG.debug("Replayed Transaction {}. Request: {}", trxnLogIndex,
-            purgeKeysRequest);
-        if (!keysNotPurged.isEmpty()) {
-          StringBuilder notPurgeList = new StringBuilder();
-          for (String key : keysNotPurged) {
-            notPurgeList.append(", ").append(key);
-          }
-          LOG.debug("Following keys from Volume:{}, Bucket:{} will not be" +
-              " purged: {}", notPurgeList.toString().substring(2));
-        }
+    for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) {
+      for (String deletedKey : bucketWithDeleteKeys.getKeysList()) {
+        keysToBePurgedList.add(deletedKey);
       }
     }
 
-    if (success) {
-      if (LOG.isDebugEnabled()) {
-        if (keysToBePurgedList.isEmpty()) {
-          LOG.debug("No keys will be purged as part of KeyPurgeRequest: {}",
-              purgeKeysRequest);
-        } else {
-          LOG.debug("Following keys will be purged as part of " +
-              "KeyPurgeRequest: {} - {}", purgeKeysRequest,
-              String.join(",", keysToBePurgedList));
-        }
-      }
-      omClientResponse = new OMKeyPurgeResponse(omResponse.build(),
+    omClientResponse = new OMKeyPurgeResponse(omResponse.build(),
           keysToBePurgedList);
-    } else {
-      omClientResponse = new OMKeyPurgeResponse(createErrorOMResponse(
-          omResponse, exception));
-    }
-
     addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
         omDoubleBufferHelper);
+
     return omClientResponse;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
index f0069a1..dc83ff6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
@@ -71,16 +71,6 @@ public class OMKeyRenameRequest extends OMKeyRequest {
     super(omRequest);
   }
 
-  /**
-   * Stores the result of request execution for Rename Requests.
-   */
-  private enum Result {
-    SUCCESS,
-    DELETE_FROM_KEY_ONLY,
-    REPLAY,
-    FAILURE,
-  }
-
   @Override
   public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
 
@@ -162,87 +152,40 @@ public class OMKeyRenameRequest extends OMKeyRequest {
       OmKeyInfo toKeyValue = omMetadataManager.getKeyTable().get(toKey);
 
       if (toKeyValue != null) {
-
-        // Check if this transaction is a replay of ratis logs.
-        if (isReplay(ozoneManager, toKeyValue, trxnLogIndex)) {
-
-          // Check if fromKey is still in the DB and created before this
-          // replay.
-          // For example, lets say we have the following sequence of
-          // transactions.
-          //     Trxn 1 : Create Key1
-          //     Trnx 2 : Rename Key1 to Key2 -> Deletes Key1 and Creates Key2
-          // Now if these transactions are replayed:
-          //     Replay Trxn 1 : Creates Key1 again as Key1 does not exist in DB
-          //     Replay Trxn 2 : Key2 is not created as it exists in DB and the
-          //                     request would be deemed a replay. But Key1
-          //                     is still in the DB and needs to be deleted.
-          fromKeyValue = omMetadataManager.getKeyTable().get(fromKey);
-          if (fromKeyValue != null) {
-            // Check if this replay transaction was after the fromKey was
-            // created. If so, we have to delete the fromKey.
-            if (ozoneManager.isRatisEnabled() &&
-                trxnLogIndex > fromKeyValue.getUpdateID()) {
-              // Add to cache. Only fromKey should be deleted. ToKey already
-              // exists in DB as this transaction is a replay.
-              result = Result.DELETE_FROM_KEY_ONLY;
-              Table<String, OmKeyInfo> keyTable = omMetadataManager
-                  .getKeyTable();
-              keyTable.addCacheEntry(new CacheKey<>(fromKey),
-                  new CacheValue<>(Optional.absent(), trxnLogIndex));
-
-              omClientResponse = new OMKeyRenameResponse(omResponse
-                  .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(),
-                  fromKeyName, fromKeyValue);
-            }
-          }
-
-          if (result == null) {
-            result = Result.REPLAY;
-            // If toKey exists and fromKey does not, then no further action is
-            // required. Return a dummy OMClientResponse.
-            omClientResponse = new OMKeyRenameResponse(createReplayOMResponse(
-                omResponse));
-          }
-        } else {
-          // This transaction is not a replay. toKeyName should not exist
-          throw new OMException("Key already exists " + toKeyName,
+        throw new OMException("Key already exists " + toKeyName,
               OMException.ResultCodes.KEY_ALREADY_EXISTS);
-        }
-      } else {
-
-        // This transaction is not a replay.
+      }
 
-        // fromKeyName should exist
-        fromKeyValue = omMetadataManager.getKeyTable().get(fromKey);
-        if (fromKeyValue == null) {
+      // fromKeyName should exist
+      fromKeyValue = omMetadataManager.getKeyTable().get(fromKey);
+      if (fromKeyValue == null) {
           // TODO: Add support for renaming open key
-          throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND);
-        }
+        throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND);
+      }
 
-        fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+      fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
-        fromKeyValue.setKeyName(toKeyName);
-        //Set modification time
-        fromKeyValue.setModificationTime(renameKeyArgs.getModificationTime());
+      fromKeyValue.setKeyName(toKeyName);
 
-        // Add to cache.
-        // fromKey should be deleted, toKey should be added with newly updated
-        // omKeyInfo.
-        Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable();
+      //Set modification time
+      fromKeyValue.setModificationTime(renameKeyArgs.getModificationTime());
 
-        keyTable.addCacheEntry(new CacheKey<>(fromKey),
-            new CacheValue<>(Optional.absent(), trxnLogIndex));
+      // Add to cache.
+      // fromKey should be deleted, toKey should be added with newly updated
+      // omKeyInfo.
+      Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable();
 
-        keyTable.addCacheEntry(new CacheKey<>(toKey),
-            new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex));
+      keyTable.addCacheEntry(new CacheKey<>(fromKey),
+          new CacheValue<>(Optional.absent(), trxnLogIndex));
 
-        omClientResponse = new OMKeyRenameResponse(omResponse
-            .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(),
-            fromKeyName, toKeyName, fromKeyValue);
+      keyTable.addCacheEntry(new CacheKey<>(toKey),
+          new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex));
 
-        result = Result.SUCCESS;
-      }
+      omClientResponse = new OMKeyRenameResponse(omResponse
+          .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(),
+          fromKeyName, toKeyName, fromKeyValue);
+
+      result = Result.SUCCESS;
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
@@ -257,10 +200,8 @@ public class OMKeyRenameRequest extends OMKeyRequest {
       }
     }
 
-    if (result == Result.SUCCESS || result == Result.FAILURE) {
-      auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap,
-          exception, getOmRequest().getUserInfo()));
-    }
+    auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap,
+        exception, getOmRequest().getUserInfo()));
 
     switch (result) {
     case SUCCESS:
@@ -268,15 +209,6 @@ public class OMKeyRenameRequest extends OMKeyRequest {
               " fromKey:{} toKey:{}. ", volumeName, bucketName, fromKeyName,
           toKeyName);
       break;
-    case DELETE_FROM_KEY_ONLY:
-      LOG.debug("Replayed transaction {}: {}. Renamed Key {} already exists. " +
-              "Deleting old key {}.", trxnLogIndex, renameKeyRequest, toKey,
-          fromKey);
-      break;
-    case REPLAY:
-      LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-          renameKeyRequest);
-      break;
     case FAILURE:
       ozoneManager.getMetrics().incNumKeyRenameFails();
       LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} " +
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
index b5e8dc8..9a7d993 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
@@ -151,13 +150,6 @@ public class OMKeysDeleteRequest extends OMKeyRequest {
           throw new OMException("Key not found: " + keyName, KEY_NOT_FOUND);
         }
 
-        // Check if this transaction is a replay of ratis logs.
-        if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) {
-          // Replay implies the response has already been returned to
-          // the client. So take no further action and return a dummy
-          // OMClientResponse.
-          throw new OMReplayException();
-        }
       }
 
       omClientResponse = new OMKeysDeleteResponse(omResponse
@@ -165,29 +157,20 @@ public class OMKeysDeleteRequest extends OMKeyRequest {
           omKeyInfoList, trxnLogIndex, ozoneManager.isRatisEnabled());
       result = Result.SUCCESS;
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        result = Result.REPLAY;
-        omClientResponse = new OMKeyDeleteResponse(createReplayOMResponse(
-            omResponse));
-      } else {
-        result = Result.FAILURE;
-        exception = ex;
-
-        omClientResponse = new OMKeyDeleteResponse(
-            createOperationKeysErrorOMResponse(omResponse, exception,
-                unDeletedKeys));
-      }
+      result = Result.FAILURE;
+      exception = ex;
+
+      omClientResponse = new OMKeyDeleteResponse(
+          createOperationKeysErrorOMResponse(omResponse, exception,
+              unDeletedKeys));
 
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
     }
 
-    // Performing audit logging outside of the lock.
-    if (result != Result.REPLAY) {
-      auditLog(auditLogger, buildAuditMessage(
-          OMAction.DELETE_KEY, auditMap, exception, userInfo));
-    }
+    auditLog(auditLogger, buildAuditMessage(
+        OMAction.DELETE_KEY, auditMap, exception, userInfo));
 
     switch (result) {
     case SUCCESS:
@@ -195,10 +178,6 @@ public class OMKeysDeleteRequest extends OMKeyRequest {
       LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName,
           bucketName, keyName);
       break;
-    case REPLAY:
-      LOG.debug("Replayed Transaction {} ignored. Request: {}",
-          trxnLogIndex, deleteKeyRequest);
-      break;
     case FAILURE:
       omMetrics.incNumKeyDeleteFails();
       LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key{}." +
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
index 025c258..9fae498 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
@@ -24,7 +24,6 @@ import com.google.common.base.Optional;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
@@ -93,14 +92,6 @@ public abstract class OMKeyAclRequest extends OMClientRequest {
         throw new OMException(OMException.ResultCodes.KEY_NOT_FOUND);
       }
 
-      // Check if this transaction is a replay of ratis logs.
-      // If this is a replay, then the response has already been returned to
-      // the client. So take no further action and return a dummy
-      // OMClientResponse.
-      if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) {
-        throw new OMReplayException();
-      }
-
       operationResult = apply(omKeyInfo, trxnLogIndex);
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
@@ -112,14 +103,9 @@ public abstract class OMKeyAclRequest extends OMClientRequest {
       omClientResponse = onSuccess(omResponse, omKeyInfo, operationResult);
       result = Result.SUCCESS;
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        result = Result.REPLAY;
-        omClientResponse = onReplay(omResponse);
-      } else {
-        result = Result.FAILURE;
-        exception = ex;
-        omClientResponse = onFailure(omResponse, ex);
-      }
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = onFailure(omResponse, ex);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -170,10 +156,6 @@ public abstract class OMKeyAclRequest extends OMClientRequest {
     return new OMKeyAclResponse(createErrorOMResponse(omResponse, exception));
   }
 
-  OMClientResponse onReplay(OMResponse.Builder omResponse) {
-    return new OMKeyAclResponse(createReplayOMResponse(omResponse));
-  }
-
   /**
    * Completion hook for final processing before return without lock.
    * Usually used for logging without lock and metric update.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
index 444c0df..3697cb8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
@@ -87,12 +87,6 @@ public class OMKeyAddAclRequest extends OMKeyAclRequest {
         }
       }
       break;
-    case REPLAY:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            getOmRequest());
-      }
-      break;
     case FAILURE:
       LOG.error("Add acl {} to path {} failed!", ozoneAcls, path, exception);
       break;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
index 18e999d..67b891a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
@@ -88,12 +88,6 @@ public class OMKeyRemoveAclRequest extends OMKeyAclRequest {
         }
       }
       break;
-    case REPLAY:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            getOmRequest());
-      }
-      break;
     case FAILURE:
       LOG.error("Remove acl {} to path {} failed!", ozoneAcls, path, exception);
       break;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
index d8dbe77..70f7b28 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
@@ -84,12 +84,6 @@ public class OMKeySetAclRequest extends OMKeyAclRequest {
         LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, path);
       }
       break;
-    case REPLAY:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            getOmRequest());
-      }
-      break;
     case FAILURE:
       LOG.error("Set acl {} to path {} failed!", ozoneAcls, path, exception);
       break;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java
index 7cde2c2..e928402 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java
@@ -26,12 +26,10 @@ import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.PrefixManagerImpl;
 import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -86,14 +84,6 @@ public abstract class OMPrefixAclRequest extends OMClientRequest {
 
       omPrefixInfo = omMetadataManager.getPrefixTable().get(prefixPath);
 
-      // Check if this transaction is a replay of ratis logs.
-      if (omPrefixInfo != null) {
-        if (isReplay(ozoneManager, omPrefixInfo, trxnLogIndex)) {
-          // This is a replayed transaction. Return dummy response.
-          throw new OMReplayException();
-        }
-      }
-
       try {
         operationResult = apply(prefixManager, omPrefixInfo, trxnLogIndex);
       } catch (IOException ex) {
@@ -129,14 +119,9 @@ public abstract class OMPrefixAclRequest extends OMClientRequest {
       result = Result.SUCCESS;
 
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        result = Result.REPLAY;
-        omClientResponse = onReplay(omResponse);
-      } else {
-        result = Result.FAILURE;
-        exception = ex;
-        omClientResponse = onFailure(omResponse, ex);
-      }
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = onFailure(omResponse, ex);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -187,15 +172,6 @@ public abstract class OMPrefixAclRequest extends OMClientRequest {
       IOException exception);
 
   /**
-   * Get the OM Client Response on replayed transactions.
-   * @param omResonse
-   * @return OMClientResponse
-   */
-  OMClientResponse onReplay(OMResponse.Builder omResonse) {
-    return new OMPrefixAclResponse(createReplayOMResponse(omResonse));
-  }
-
-  /**
    * Completion hook for final processing before return without lock.
    * Usually used for logging without lock and metric update.
    * @param operationResult
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
index bd25e07..7160042 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
@@ -106,12 +106,6 @@ public class OMPrefixAddAclRequest extends OMPrefixAclRequest {
         }
       }
       break;
-    case REPLAY:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            getOmRequest());
-      }
-      break;
     case FAILURE:
       omMetrics.incNumBucketUpdateFails();
       LOG.error("Add acl {} to path {} failed!", ozoneAcls,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
index 72c199c..3731ad1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
@@ -103,12 +103,6 @@ public class OMPrefixRemoveAclRequest extends OMPrefixAclRequest {
         }
       }
       break;
-    case REPLAY:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            getOmRequest());
-      }
-      break;
     case FAILURE:
       omMetrics.incNumBucketUpdateFails();
       LOG.error("Remove acl {} to path {} failed!", ozoneAcls,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java
index 122ada1..44bc43b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java
@@ -99,12 +99,6 @@ public class OMPrefixSetAclRequest extends OMPrefixAclRequest {
             ozoneObj.getPath());
       }
       break;
-    case REPLAY:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            getOmRequest());
-      }
-      break;
     case FAILURE:
       omMetrics.incNumBucketUpdateFails();
       LOG.error("Set acl {} to path {} failed!", ozoneAcls,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index f51cba8..4f95fe4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -121,11 +121,6 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
-      // We do not check if this transaction is a replay here to avoid extra
-      // DB reads. Even if this transaction is replayed, in
-      // S3MultipartUploadComplete request, we would delete this entry from
-      // the openKeyTable. Hence, it is safe to replay this transaction here.
-
       // We are adding uploadId to key, because if multiple users try to
       // perform multipart upload on the same key, each will try to upload, who
       // ever finally commit the key, we see that key in ozone. Suppose if we
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index 8c8e010..4518a3b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -122,26 +122,6 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
             OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
       }
 
-      // We do not check if this transaction is a replay. If OmKeyInfo
-      // exists, then we should delete it from OpenKeyTable irrespective of
-      // whether this transaction is a replay. There are 3 scenarios:
-      //   Trxn 1 : Initiate Multipart Upload request for key1
-      //            (openKey = openKey1)
-      //   Trxn 2 : Abort Multipart Upload request for opneKey1
-      //
-      // Scenario 1 : This is not a replay transaction.
-      //      omKeyInfo is not null and we proceed with the abort request to
-      //      deleted openKey1 from openKeyTable.
-      // Scenario 2 : Trxn 1 and 2 are replayed.
-      //      Replay of Trxn 1 would create openKey1 in openKeyTable as we do
-      //      not check for replay in S3InitiateMultipartUploadRequest.
-      //      Hence, we should replay Trxn 2 also to maintain consistency.
-      // Scenario 3 : Trxn 2 is replayed and not Trxn 1.
-      //      This will result in omKeyInfo == null as openKey1 would already
-      //      have been deleted from openKeyTable.
-      // So in both scenarios 1 and 2 (omKeyInfo not null), we should go
-      // ahead with this request irrespective of whether it is a replay or not.
-
       multipartKeyInfo = omMetadataManager.getMultipartInfoTable()
           .get(multipartKey);
       multipartKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index d9004c0..346ff87 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@@ -133,15 +132,6 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
 
       if (omKeyInfo == null) {
-        // Check the KeyTable if this transaction is a replay of ratis logs.
-        String ozoneKey = omMetadataManager.getOzoneKey(volumeName,
-            bucketName, keyName);
-        OmKeyInfo dbKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-        if (dbKeyInfo != null) {
-          if (isReplay(ozoneManager, dbKeyInfo, trxnLogIndex)) {
-            throw new OMReplayException();
-          }
-        }
         throw new OMException("Failed to commit Multipart Upload key, as " +
             openKey + "entry is not found in the openKey table",
             KEY_NOT_FOUND);
@@ -212,21 +202,17 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
               .setPartName(partName));
       omClientResponse = new S3MultipartUploadCommitPartResponse(
           omResponse.build(), multipartKey, openKey,
-          multipartKeyInfo, oldPartKeyInfo, ozoneManager.isRatisEnabled());
+          multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
+          ozoneManager.isRatisEnabled());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        result = Result.REPLAY;
-        omClientResponse = new S3MultipartUploadCommitPartResponse(
-            createReplayOMResponse(omResponse));
-      } else {
-        result = Result.FAILURE;
-        exception = ex;
-        omClientResponse = new S3MultipartUploadCommitPartResponse(
-            createErrorOMResponse(omResponse, exception), openKey, omKeyInfo,
-            ozoneManager.isRatisEnabled());
-      }
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new S3MultipartUploadCommitPartResponse(
+          createErrorOMResponse(omResponse, exception), multipartKey, openKey,
+          multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
+          ozoneManager.isRatisEnabled());
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -243,10 +229,6 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
         getOmRequest().getUserInfo()));
 
     switch (result) {
-    case REPLAY:
-      LOG.debug("Replayed Transaction {} ignored. Request: {}",
-          trxnLogIndex, multipartCommitUploadPartRequest);
-      break;
     case SUCCESS:
       LOG.debug("MultipartUpload Commit is successfully for Key:{} in " +
           "Volume/Bucket {}/{}", keyName, volumeName, bucketName);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index 17a8c61..c4e315c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
@@ -68,13 +67,6 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
   private static final Logger LOG =
       LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class);
 
-  private enum Result {
-    SUCCESS,
-    REPLAY,
-    DELETE_OPEN_KEY_ONLY,
-    FAILURE
-  }
-
   public S3MultipartUploadCompleteRequest(OMRequest omRequest) {
     super(omRequest);
   }
@@ -132,36 +124,6 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
-      OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-
-      if (omKeyInfo != null) {
-        // Check if this transaction is a replay of ratis logs.
-        if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) {
-          // During S3InitiateMultipartUpload or KeyCreate, we do not check
-          // the OpenKey Table for replay. This is so as to avoid an extra
-          // DB read during KeyCreate.
-          // If this transaction is a replay, the S3InitiateMultipartUpload
-          // and part key KeyCreate request could also have been replayed.
-          // And since we do not check for replay there, we should scrub
-          // the key from OpenKey table and MultipartInfo table now, if it
-          // exists.
-
-          OmKeyInfo openMultipartKeyInfo = omMetadataManager
-              .getOpenKeyTable().get(multipartKey);
-          if (openMultipartKeyInfo != null) {
-            omMetadataManager.getOpenKeyTable().addCacheEntry(
-                new CacheKey<>(multipartKey),
-                new CacheValue<>(Optional.absent(), trxnLogIndex));
-            omMetadataManager.getMultipartInfoTable().addCacheEntry(
-                new CacheKey<>(multipartKey),
-                new CacheValue<>(Optional.absent(), trxnLogIndex));
-
-            throw new OMReplayException(true);
-          }
-          throw new OMReplayException(false);
-        }
-      }
-
       OmMultipartKeyInfo multipartKeyInfo = omMetadataManager
           .getMultipartInfoTable().get(multipartKey);
 
@@ -259,6 +221,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
         HddsProtos.ReplicationFactor factor =
             partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor();
 
+        OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
         if (omKeyInfo == null) {
           // This is a newly added key, it does not have any versions.
           OmKeyLocationInfoGroup keyLocationInfoGroup = new
@@ -329,22 +292,10 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
       }
 
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        if (((OMReplayException) ex).isDBOperationNeeded()) {
-          result = Result.DELETE_OPEN_KEY_ONLY;
-          omClientResponse = new S3MultipartUploadCompleteResponse(
-              omResponse.build(), multipartKey);
-        } else {
-          result = Result.REPLAY;
-          omClientResponse = new S3MultipartUploadCompleteResponse(
-              createReplayOMResponse(omResponse));
-        }
-      } else {
-        result = Result.FAILURE;
-        exception = ex;
-        omClientResponse = new S3MultipartUploadCompleteResponse(
-            createErrorOMResponse(omResponse, exception));
-      }
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new S3MultipartUploadCompleteResponse(
+          createErrorOMResponse(omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -354,30 +305,19 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
       }
     }
 
-    if (result != Result.REPLAY && result != Result.DELETE_OPEN_KEY_ONLY) {
-      Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
-      auditMap.put(OzoneConsts.MULTIPART_LIST, partsList.toString());
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    auditMap.put(OzoneConsts.MULTIPART_LIST, partsList.toString());
 
-      // audit log
-      auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-          OMAction.COMPLETE_MULTIPART_UPLOAD, auditMap, exception,
-          getOmRequest().getUserInfo()));
-    }
+    // audit log
+    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+        OMAction.COMPLETE_MULTIPART_UPLOAD, auditMap, exception,
+        getOmRequest().getUserInfo()));
 
     switch (result) {
     case SUCCESS:
       LOG.debug("MultipartUpload Complete request is successfull for Key: {} " +
           "in Volume/Bucket {}/{}", keyName, volumeName, bucketName);
       break;
-    case REPLAY:
-      LOG.debug("Replayed Transaction {} ignored. Request: {}",
-          trxnLogIndex, multipartUploadCompleteRequest);
-      break;
-    case DELETE_OPEN_KEY_ONLY:
-      LOG.debug("Replayed Transaction {}. Deleting old key {} from OpenKey " +
-          "table and MultipartInfo table. Request: {}", trxnLogIndex,
-          multipartKey, multipartUploadCompleteRequest);
-      break;
     case FAILURE:
       ozoneManager.getMetrics().incNumCompleteMultipartUploadFails();
       LOG.error("MultipartUpload Complete request failed for Key: {} " +
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
index 765a20c..7e2ccd9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
@@ -147,11 +147,12 @@ public class OMVolumeCreateRequest extends OMVolumeRequest {
 
       String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
 
-      OmVolumeArgs dbVolumeArgs =
-          omMetadataManager.getVolumeTable().get(dbVolumeKey);
-
       UserVolumeInfo volumeList = null;
-      if (dbVolumeArgs == null) {
+      if (omMetadataManager.getVolumeTable().isExist(dbVolumeKey)) {
+        LOG.debug("volume:{} already exists", omVolumeArgs.getVolume());
+        throw new OMException("Volume already exists",
+            OMException.ResultCodes.VOLUME_ALREADY_EXISTS);
+      } else {
         String dbUserKey = omMetadataManager.getUserKey(owner);
         volumeList = omMetadataManager.getUserTable().get(dbUserKey);
         volumeList = addVolumeToOwnerList(volumeList, volume, owner,
@@ -164,20 +165,6 @@ public class OMVolumeCreateRequest extends OMVolumeRequest {
         omClientResponse = new OMVolumeCreateResponse(omResponse.build(),
             omVolumeArgs, volumeList);
         LOG.debug("volume:{} successfully created", omVolumeArgs.getVolume());
-      } else {
-        // Check if this transaction is a replay of ratis logs.
-        if (isReplay(ozoneManager, dbVolumeArgs, transactionLogIndex)) {
-          // Replay implies the response has already been returned to
-          // the client. So take no further action and return a dummy
-          // OMClientResponse.
-          LOG.debug("Replayed Transaction {} ignored. Request: {}",
-              transactionLogIndex, createVolumeRequest);
-          return new OMVolumeCreateResponse(createReplayOMResponse(omResponse));
-        } else {
-          LOG.debug("volume:{} already exists", omVolumeArgs.getVolume());
-          throw new OMException("Volume already exists",
-              OMException.ResultCodes.VOLUME_ALREADY_EXISTS);
-        }
       }
 
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
index 4d2f055..ce93e26 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
@@ -99,16 +99,6 @@ public class OMVolumeDeleteRequest extends OMVolumeRequest {
 
       OmVolumeArgs omVolumeArgs = getVolumeInfo(omMetadataManager, volume);
 
-      // Check if this transaction is a replay of ratis logs.
-      // If this is a replay, then the response has already been returned to
-      // the client. So take no further action and return a dummy
-      // OMClientResponse.
-      if (isReplay(ozoneManager, omVolumeArgs, transactionLogIndex)) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}",
-            transactionLogIndex, deleteVolumeRequest);
-        return new OMVolumeDeleteResponse(createReplayOMResponse(omResponse));
-      }
-
       owner = omVolumeArgs.getOwnerName();
       acquiredUserLock = omMetadataManager.getLock().acquireWriteLock(USER_LOCK,
           owner);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
index 1eea419..6873086 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
@@ -18,40 +18,34 @@
 
 package org.apache.hadoop.ozone.om.request.volume;
 
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
-
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.OMAction;
-import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetOwnerResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .SetVolumePropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyResponse;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
 
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
 
@@ -124,30 +118,13 @@ public class OMVolumeSetOwnerRequest extends OMVolumeRequest {
       }
 
       long maxUserVolumeCount = ozoneManager.getMaxUserVolumeCount();
-      String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
       OzoneManagerProtocolProtos.UserVolumeInfo oldOwnerVolumeList = null;
       OzoneManagerProtocolProtos.UserVolumeInfo newOwnerVolumeList = null;
       OmVolumeArgs omVolumeArgs = null;
 
       acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock(
           VOLUME_LOCK, volume);
-      omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey);
-      if (omVolumeArgs == null) {
-        LOG.debug("Changing volume ownership failed for user:{} volume:{}",
-            newOwner, volume);
-        throw new OMException("Volume " + volume + " is not found",
-            OMException.ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      // Check if this transaction is a replay of ratis logs.
-      // If this is a replay, then the response has already been returned to
-      // the client. So take no further action and return a dummy
-      // OMClientResponse.
-      if (isReplay(ozoneManager, omVolumeArgs, transactionLogIndex)) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}",
-            transactionLogIndex, setVolumePropertyRequest);
-        return new OMVolumeSetOwnerResponse(createReplayOMResponse(omResponse));
-      }
+      omVolumeArgs = getVolumeInfo(omMetadataManager, volume);
       oldOwner = omVolumeArgs.getOwnerName();
 
       // Return OK immediately if newOwner is the same as oldOwner.
@@ -194,7 +171,7 @@ public class OMVolumeSetOwnerRequest extends OMVolumeRequest {
           new CacheValue<>(Optional.of(oldOwnerVolumeList),
               transactionLogIndex));
       omMetadataManager.getVolumeTable().addCacheEntry(
-          new CacheKey<>(dbVolumeKey),
+          new CacheKey<>(omMetadataManager.getVolumeKey(volume)),
           new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
 
       omResponse.setSetVolumePropertyResponse(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
index 7e0cb72..746a1a6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetQuotaResponse;
@@ -127,23 +126,8 @@ public class OMVolumeSetQuotaRequest extends OMVolumeRequest {
 
       acquireVolumeLock = omMetadataManager.getLock().acquireWriteLock(
           VOLUME_LOCK, volume);
-      String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
-      omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey);
 
-      if (omVolumeArgs == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      // Check if this transaction is a replay of ratis logs.
-      // If this is a replay, then the response has already been returned to
-      // the client. So take no further action and return a dummy
-      // OMClientResponse.
-      if (isReplay(ozoneManager, omVolumeArgs, transactionLogIndex)) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}",
-            transactionLogIndex, setVolumePropertyRequest);
-        return new OMVolumeSetQuotaResponse(createReplayOMResponse(omResponse));
-      }
+      omVolumeArgs = getVolumeInfo(omMetadataManager, volume);
 
       omVolumeArgs.setQuotaInBytes(setVolumePropertyRequest.getQuotaInBytes());
       omVolumeArgs.setUpdateID(transactionLogIndex,
@@ -153,7 +137,7 @@ public class OMVolumeSetQuotaRequest extends OMVolumeRequest {
 
       // update cache.
       omMetadataManager.getVolumeTable().addCacheEntry(
-          new CacheKey<>(dbVolumeKey),
+          new CacheKey<>(omMetadataManager.getVolumeKey(volume)),
           new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
 
       omResponse.setSetVolumePropertyResponse(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
index f2610e5..de7f0c0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
@@ -25,12 +25,10 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.volume.OMVolumeRequest;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -46,7 +44,7 @@ import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_L
 /**
  * Base class for OMVolumeAcl Request.
  */
-public abstract class OMVolumeAclRequest extends OMClientRequest {
+public abstract class OMVolumeAclRequest extends OMVolumeRequest {
 
   private CheckedBiFunction<List<OzoneAcl>, OmVolumeArgs, IOException>
       omVolumeAclOp;
@@ -84,19 +82,7 @@ public abstract class OMVolumeAclRequest extends OMClientRequest {
       }
       lockAcquired = omMetadataManager.getLock().acquireWriteLock(
           VOLUME_LOCK, volume);
-      String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
-      omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey);
-      if (omVolumeArgs == null) {
-        throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND);
-      }
-
-      // Check if this transaction is a replay of ratis logs.
-      // If this is a replay, then the response has already been returned to
-      // the client. So take no further action and return a dummy
-      // OMClientResponse.
-      if (isReplay(ozoneManager, omVolumeArgs, trxnLogIndex)) {
-        throw new OMReplayException();
-      }
+      omVolumeArgs = getVolumeInfo(omMetadataManager, volume);
 
       // result is false upon add existing acl or remove non-existing acl
       boolean applyAcl = true;
@@ -106,27 +92,23 @@ public abstract class OMVolumeAclRequest extends OMClientRequest {
         applyAcl = false;
       }
 
-      // We set the updateID even if applyAcl = false to catch the replay
-      // transactions.
-      omVolumeArgs.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+      // Update only when
+      if (applyAcl) {
+        omVolumeArgs.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
-      // update cache.
-      omMetadataManager.getVolumeTable().addCacheEntry(
-          new CacheKey<>(dbVolumeKey),
-          new CacheValue<>(Optional.of(omVolumeArgs), trxnLogIndex));
+        // update cache.
+        omMetadataManager.getVolumeTable().addCacheEntry(
+            new CacheKey<>(omMetadataManager.getVolumeKey(volume)),
+            new CacheValue<>(Optional.of(omVolumeArgs), trxnLogIndex));
+      }
 
       omClientResponse = onSuccess(omResponse, omVolumeArgs, applyAcl);
       result = Result.SUCCESS;
     } catch (IOException ex) {
-      if (ex instanceof OMReplayException) {
-        result = Result.REPLAY;
-        omClientResponse = onReplay(omResponse);
-      } else {
-        result = Result.FAILURE;
-        exception = ex;
-        omMetrics.incNumVolumeUpdateFails();
-        omClientResponse = onFailure(omResponse, ex);
-      }
+      result = Result.FAILURE;
+      exception = ex;
+      omMetrics.incNumVolumeUpdateFails();
+      omClientResponse = onFailure(omResponse, ex);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -183,10 +165,6 @@ public abstract class OMVolumeAclRequest extends OMClientRequest {
   abstract OMClientResponse onFailure(OMResponse.Builder omResponse,
       IOException ex);
 
-  OMClientResponse onReplay(OMResponse.Builder omResonse) {
-    return new OMVolumeAclOpResponse(createReplayOMResponse(omResonse));
-  }
-
   /**
    * Completion hook for final processing before return without lock.
    * Usually used for logging without lock.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
index e0f9b3d..12008e2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
@@ -104,12 +104,6 @@ public class OMVolumeAddAclRequest extends OMVolumeAclRequest {
             getVolumeName());
       }
       break;
-    case REPLAY:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            getOmRequest());
-      }
-      break;
     case FAILURE:
       LOG.error("Add acl {} to volume {} failed!", getAcl(), getVolumeName(),
           ex);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
index 6e90731..461ad48 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
@@ -103,12 +103,6 @@ public class OMVolumeRemoveAclRequest extends OMVolumeAclRequest {
             getVolumeName());
       }
       break;
-    case REPLAY:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            getOmRequest());
-      }
-      break;
     case FAILURE:
       LOG.error("Remove acl {} from volume {} failed!", getAcl(),
           getVolumeName(), ex);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
index 8d5bc61..c73e19e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
@@ -100,12 +100,6 @@ public class OMVolumeSetAclRequest extends OMVolumeAclRequest {
             getVolumeName());
       }
       break;
-    case REPLAY:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Replayed Transaction {} ignored. Request: {}", trxnLogIndex,
-            getOmRequest());
-      }
-      break;
     case FAILURE:
       LOG.error("Set acls {} to volume {} failed!", getAcls(),
           getVolumeName(), ex);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java
index aa3b346..4af78fe 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java
@@ -42,7 +42,7 @@ public abstract class OMClientResponse {
   }
 
   /**
-   * For error or replay cases, check that the status of omResponse is not OK.
+   * For error case, check that the status of omResponse is not OK.
    */
   public void checkStatusNotOK() {
     Preconditions.checkArgument(!omResponse.getStatus().equals(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java
index 6948b67..cb1f322 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java
@@ -48,7 +48,7 @@ public final class OMBucketCreateResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMBucketCreateResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java
index 4416757..c3c7fef 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java
@@ -48,7 +48,7 @@ public final class OMBucketDeleteResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMBucketDeleteResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java
index c827e68..b9d3cf0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java
@@ -45,7 +45,7 @@ public class OMBucketSetPropertyResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMBucketSetPropertyResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java
index 442dcd1..0a72523 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java
@@ -46,7 +46,7 @@ public class OMBucketAclResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMBucketAclResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java
index 499b6f1..2608a1b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.ozone.om.response.file;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
+
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMResponse;
@@ -29,7 +31,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
 import java.io.IOException;
 import java.util.List;
 
@@ -46,51 +47,50 @@ public class OMDirectoryCreateResponse extends OMClientResponse {
 
   private OmKeyInfo dirKeyInfo;
   private List<OmKeyInfo> parentKeyInfos;
+  private Result result;
 
   public OMDirectoryCreateResponse(@Nonnull OMResponse omResponse,
-      @Nullable OmKeyInfo dirKeyInfo,
-      @Nullable List<OmKeyInfo> parentKeyInfos) {
-
+      @Nonnull OmKeyInfo dirKeyInfo,
+      @Nonnull List<OmKeyInfo> parentKeyInfos, @Nonnull Result result) {
     super(omResponse);
     this.dirKeyInfo = dirKeyInfo;
     this.parentKeyInfos = parentKeyInfos;
+    this.result = result;
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction or
-   * the directory already exists.
+   * For when the request is not successful or the directory already exists.
    */
-  public OMDirectoryCreateResponse(@Nonnull OMResponse omResponse) {
+  public OMDirectoryCreateResponse(@Nonnull OMResponse omResponse,
+      @Nonnull Result result) {
     super(omResponse);
+    this.result = result;
   }
 
   @Override
   protected void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
-    if (dirKeyInfo != null) {
-      if (parentKeyInfos != null) {
-        for (OmKeyInfo parentKeyInfo : parentKeyInfos) {
-          String parentKey = omMetadataManager
-              .getOzoneDirKey(parentKeyInfo.getVolumeName(),
-                  parentKeyInfo.getBucketName(), parentKeyInfo.getKeyName());
-          LOG.debug("putWithBatch parent : key {} info : {}", parentKey,
-              parentKeyInfo);
-          omMetadataManager.getKeyTable()
-              .putWithBatch(batchOperation, parentKey, parentKeyInfo);
-        }
+    if (Result.SUCCESS == result) {
+      // Add all parent keys to batch.
+      for (OmKeyInfo parentKeyInfo : parentKeyInfos) {
+        String parentKey = omMetadataManager
+            .getOzoneDirKey(parentKeyInfo.getVolumeName(),
+                parentKeyInfo.getBucketName(), parentKeyInfo.getKeyName());
+        LOG.debug("putWithBatch parent : key {} info : {}", parentKey,
+            parentKeyInfo);
+        omMetadataManager.getKeyTable()
+            .putWithBatch(batchOperation, parentKey, parentKeyInfo);
       }
 
       String dirKey = omMetadataManager.getOzoneKey(dirKeyInfo.getVolumeName(),
           dirKeyInfo.getBucketName(), dirKeyInfo.getKeyName());
       omMetadataManager.getKeyTable().putWithBatch(batchOperation, dirKey,
           dirKeyInfo);
-
-    } else {
+    } else if (Result.DIRECTORY_ALREADY_EXISTS == result) {
       // When directory already exists, we don't add it to cache. And it is
       // not an error, in this case dirKeyInfo will be null.
-      LOG.debug("Response Status is OK, dirKeyInfo is null in " +
-          "OMDirectoryCreateResponse");
+      LOG.debug("Directory already exists. addToDBBatch is a no-op");
     }
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
index de069cc..e54379b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
@@ -34,12 +34,12 @@ public class OMFileCreateResponse extends OMKeyCreateResponse {
 
   public OMFileCreateResponse(@Nonnull OMResponse omResponse,
       @Nonnull OmKeyInfo omKeyInfo,
-      List<OmKeyInfo> parentKeyInfos, long openKeySessionID) {
+      @Nonnull List<OmKeyInfo> parentKeyInfos, long openKeySessionID) {
     super(omResponse, omKeyInfo, parentKeyInfos, openKeySessionID);
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMFileCreateResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
index 7d1bd44..5ea44a7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
@@ -48,7 +48,7 @@ public class OMAllocateBlockResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMAllocateBlockResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
index 9f97bbb..c0216eb 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
@@ -50,21 +50,7 @@ public class OMKeyCommitResponse extends OMClientResponse {
   }
 
   /**
-   * When the KeyCommit request is a replay but the openKey should be deleted
-   * from the OpenKey table.
-   * Note that this response will result in openKey deletion only. Key will
-   * not be added to Key table.
-   * @param openKeyName openKey to be deleted from OpenKey table
-   */
-  public OMKeyCommitResponse(@Nonnull OMResponse omResponse,
-      String openKeyName) {
-    super(omResponse);
-    this.omKeyInfo = null;
-    this.openKeyName = openKeyName;
-  }
-
-  /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMKeyCommitResponse(@Nonnull OMResponse omResponse) {
@@ -80,12 +66,8 @@ public class OMKeyCommitResponse extends OMClientResponse {
     omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
         openKeyName);
 
-    // Add entry to Key table if omKeyInfo is available i.e. it is not a
-    // replayed transaction.
-    if (omKeyInfo != null) {
-      omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName,
-          omKeyInfo);
-    }
+    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName,
+        omKeyInfo);
   }
 
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
index db9815a..4d0899d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
@@ -56,7 +56,7 @@ public class OMKeyCreateResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMKeyCreateResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
index e0228f6..41853da 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMResponse;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
@@ -54,7 +53,7 @@ public class OMKeyDeleteResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMKeyDeleteResponse(@Nonnull OMResponse omResponse) {
@@ -68,31 +67,28 @@ public class OMKeyDeleteResponse extends OMClientResponse {
 
     // For OmResponse with failure, this should do nothing. This method is
     // not called in failure scenario in OM code.
-    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
-      String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-          omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
-      omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
-          ozoneKey);
+    String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
+        omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
+    omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, ozoneKey);
 
-      // If Key is not empty add this to delete table.
-      if (!isKeyEmpty(omKeyInfo)) {
-        // If a deleted key is put in the table where a key with the same
-        // name already exists, then the old deleted key information would be
-        // lost. To avoid this, first check if a key with same name exists.
-        // deletedTable in OM Metadata stores <KeyName, RepeatedOMKeyInfo>.
-        // The RepeatedOmKeyInfo is the structure that allows us to store a
-        // list of OmKeyInfo that can be tied to same key name. For a keyName
-        // if RepeatedOMKeyInfo structure is null, we create a new instance,
-        // if it is not null, then we simply add to the list and store this
-        // instance in deletedTable.
-        RepeatedOmKeyInfo repeatedOmKeyInfo =
-            omMetadataManager.getDeletedTable().get(ozoneKey);
-        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
-            omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(),
-            isRatisEnabled);
-        omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
+    // If Key is not empty add this to delete table.
+    if (!isKeyEmpty(omKeyInfo)) {
+      // If a deleted key is put in the table where a key with the same
+      // name already exists, then the old deleted key information would be
+      // lost. To avoid this, first check if a key with same name exists.
+      // deletedTable in OM Metadata stores <KeyName, RepeatedOMKeyInfo>.
+      // The RepeatedOmKeyInfo is the structure that allows us to store a
+      // list of OmKeyInfo that can be tied to same key name. For a keyName
+      // if RepeatedOMKeyInfo structure is null, we create a new instance,
+      // if it is not null, then we simply add to the list and store this
+      // instance in deletedTable.
+      RepeatedOmKeyInfo repeatedOmKeyInfo =
+          omMetadataManager.getDeletedTable().get(ozoneKey);
+      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+          omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(),
+          isRatisEnabled);
+      omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
             ozoneKey, repeatedOmKeyInfo);
-      }
     }
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
index e26433f..01b7457 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
@@ -39,20 +39,11 @@ public class OMKeyPurgeResponse extends OMClientResponse {
   private List<String> purgeKeyList;
 
   public OMKeyPurgeResponse(@Nonnull OMResponse omResponse,
-      List<String> keyList) {
+      @Nonnull List<String> keyList) {
     super(omResponse);
     this.purgeKeyList = keyList;
   }
 
-  /**
-   * For when the request is not successful or it is a replay transaction.
-   * For a successful request, the other constructor should be used.
-   */
-  public OMKeyPurgeResponse(@Nonnull OMResponse omResponse) {
-    super(omResponse);
-    checkStatusNotOK();
-  }
-
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
index 3e64072..7470b37 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
@@ -40,38 +39,18 @@ public class OMKeyRenameResponse extends OMClientResponse {
 
   private String fromKeyName;
   private String toKeyName;
-  private OmKeyInfo newKeyInfo;
+  private OmKeyInfo renameKeyInfo;
 
   public OMKeyRenameResponse(@Nonnull OMResponse omResponse,
       String fromKeyName, String toKeyName, @Nonnull OmKeyInfo renameKeyInfo) {
     super(omResponse);
     this.fromKeyName = fromKeyName;
     this.toKeyName = toKeyName;
-    this.newKeyInfo = renameKeyInfo;
+    this.renameKeyInfo = renameKeyInfo;
   }
 
   /**
-   * When Rename request is replayed and toKey already exists, but fromKey
-   * has not been deleted.
-   * For example, lets say we have the following sequence of transactions
-   *  Trxn 1 : Create Key1
-   *  Trnx 2 : Rename Key1 to Key2 -> Deletes Key1 and Creates Key2
-   *  Now if these transactions are replayed:
-   *  Replay Trxn 1 : Creates Key1 again as Key1 does not exist in DB
-   *  Replay Trxn 2 : Key2 is not created as it exists in DB and the request
-   *  would be deemed a replay. But Key1 is still in the DB and needs to be
-   *  deleted.
-   */
-  public OMKeyRenameResponse(@Nonnull OMResponse omResponse,
-      String fromKeyName, OmKeyInfo fromKeyInfo) {
-    super(omResponse);
-    this.fromKeyName = fromKeyName;
-    this.newKeyInfo = fromKeyInfo;
-    this.toKeyName = null;
-  }
-
-  /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMKeyRenameResponse(@Nonnull OMResponse omResponse) {
@@ -82,31 +61,13 @@ public class OMKeyRenameResponse extends OMClientResponse {
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
-    String volumeName = newKeyInfo.getVolumeName();
-    String bucketName = newKeyInfo.getBucketName();
-    // If toKeyName is null, then we need to only delete the fromKeyName from
-    // KeyTable. This is the case of replay where toKey exists but fromKey
-    // has not been deleted.
-    if (deleteFromKeyOnly()) {
-      omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
-          omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName));
-    } else if (createToKeyAndDeleteFromKey()) {
-      // If both from and toKeyName are equal do nothing
-      omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
-          omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName));
-      omMetadataManager.getKeyTable().putWithBatch(batchOperation,
-          omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName),
-          newKeyInfo);
-    }
+    String volumeName = renameKeyInfo.getVolumeName();
+    String bucketName = renameKeyInfo.getBucketName();
+    omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
+        omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName));
+    omMetadataManager.getKeyTable().putWithBatch(batchOperation,
+        omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName),
+        renameKeyInfo);
   }
 
-  @VisibleForTesting
-  public boolean deleteFromKeyOnly() {
-    return toKeyName == null && fromKeyName != null;
-  }
-
-  @VisibleForTesting
-  public boolean createToKeyAndDeleteFromKey() {
-    return toKeyName != null && !toKeyName.equals(fromKeyName);
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
index e3177f8..2bbeae0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
@@ -47,7 +47,7 @@ public class OMKeyAclResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMKeyAclResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java
index 225bad3..288a38f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java
@@ -45,7 +45,7 @@ public class OMPrefixAclResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMPrefixAclResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java
index 0cc8dff..ec1b3ae 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java
@@ -52,7 +52,7 @@ public class S3InitiateMultipartUploadResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public S3InitiateMultipartUploadResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
index b47b22b..47cde08 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
@@ -61,7 +61,7 @@ public class S3MultipartUploadAbortResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public S3MultipartUploadAbortResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
index 298b733..28acdb5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
@@ -41,6 +41,7 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .Status.OK;
 
 import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
 
 /**
  * Response for S3MultipartUploadCommitPart request.
@@ -69,45 +70,19 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
    */
   public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse,
       String multipartKey, String openKey,
-      @Nonnull OmMultipartKeyInfo omMultipartKeyInfo,
-      @Nonnull OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+      @Nullable OmMultipartKeyInfo omMultipartKeyInfo,
+      @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+      @Nullable OmKeyInfo openPartKeyInfoToBeDeleted,
       boolean isRatisEnabled) {
     super(omResponse);
     this.multipartKey = multipartKey;
     this.openKey = openKey;
     this.omMultipartKeyInfo = omMultipartKeyInfo;
     this.oldPartKeyInfo = oldPartKeyInfo;
-    this.isRatisEnabled = isRatisEnabled;
-  }
-
-  /**
-   * For the case when Multipart Upload does not exist (could have been
-   * aborted).
-   * 1. Put the partKeyInfo from openKeyTable into DeletedTable
-   * 2. Deleted openKey from OpenKeyTable
-   * @param omResponse
-   * @param openKey
-   * @param openPartKeyInfoToBeDeleted
-   */
-  public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse,
-      String openKey, @Nonnull OmKeyInfo openPartKeyInfoToBeDeleted,
-      boolean isRatisEnabled) {
-    super(omResponse);
-    checkStatusNotOK();
-    this.openKey = openKey;
     this.openPartKeyInfoToBeDeleted = openPartKeyInfoToBeDeleted;
     this.isRatisEnabled = isRatisEnabled;
   }
 
-  /**
-   * For when the request is not successful or it is a replay transaction.
-   * For a successful request, the other constructor should be used.
-   */
-  public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse) {
-    super(omResponse);
-    checkStatusNotOK();
-  }
-
   @Override
   public void checkAndUpdateDB(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
@@ -115,12 +90,13 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
     if (getOMResponse().getStatus() == NO_SUCH_MULTIPART_UPLOAD_ERROR) {
       // Means by the time we try to commit part, some one has aborted this
       // multipart upload. So, delete this part information.
+
       RepeatedOmKeyInfo repeatedOmKeyInfo =
           omMetadataManager.getDeletedTable().get(openKey);
 
-      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
-          openPartKeyInfoToBeDeleted, repeatedOmKeyInfo,
-          openPartKeyInfoToBeDeleted.getUpdateID(), isRatisEnabled);
+      repeatedOmKeyInfo =
+          OmUtils.prepareKeyForDelete(openPartKeyInfoToBeDeleted,
+          repeatedOmKeyInfo, omMultipartKeyInfo.getUpdateID(), isRatisEnabled);
 
       omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
           openKey, repeatedOmKeyInfo);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
index 093d180..20e398e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
@@ -59,20 +59,7 @@ public class S3MultipartUploadCompleteResponse extends OMClientResponse {
   }
 
   /**
-   * When the S3MultipartUploadCompleteRequest is a replay but the
-   * openKey should be deleted from the OpenKey table.
-   * Note that this response will result in openKey deletion and
-   * multipartInfo deletion only. Key will not be added to Key table.
-   */
-  public S3MultipartUploadCompleteResponse(
-      @Nonnull OMResponse omResponse,
-      @Nonnull String multipartKey) {
-    super(omResponse);
-    this.multipartKey = multipartKey;
-  }
-
-  /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public S3MultipartUploadCompleteResponse(@Nonnull OMResponse omResponse) {
@@ -89,26 +76,23 @@ public class S3MultipartUploadCompleteResponse extends OMClientResponse {
     omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation,
         multipartKey);
 
-    if (omKeyInfo != null) {
-      String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-          omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
-      omMetadataManager.getKeyTable().putWithBatch(batchOperation,
-          ozoneKey, omKeyInfo);
+    String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
+        omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
+    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey,
+        omKeyInfo);
 
-      if (!partsUnusedList.isEmpty()) {
-        // Add unused parts to deleted key table.
-        RepeatedOmKeyInfo repeatedOmKeyInfo =
-            omMetadataManager.getDeletedTable()
-                .get(ozoneKey);
-        if (repeatedOmKeyInfo == null) {
-          repeatedOmKeyInfo = new RepeatedOmKeyInfo(partsUnusedList);
-        } else {
-          repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo);
-        }
-
-        omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-            ozoneKey, repeatedOmKeyInfo);
+    if (!partsUnusedList.isEmpty()) {
+      // Add unused parts to deleted key table.
+      RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable()
+          .get(ozoneKey);
+      if (repeatedOmKeyInfo == null) {
+        repeatedOmKeyInfo = new RepeatedOmKeyInfo(partsUnusedList);
+      } else {
+        repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo);
       }
+
+      omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
+          ozoneKey, repeatedOmKeyInfo);
     }
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java
index 647123d..f9f0688 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java
@@ -46,7 +46,7 @@ public class OMVolumeAclOpResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMVolumeAclOpResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java
index cd70dc2..1b8e26e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java
@@ -53,7 +53,7 @@ public class OMVolumeCreateResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMVolumeCreateResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
index 80d9e8c..db43fa6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
@@ -52,7 +52,7 @@ public class OMVolumeDeleteResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMVolumeDeleteResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
index 3ed8bb0..a1efe70 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
@@ -58,8 +58,8 @@ public class OMVolumeSetOwnerResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
-   * Or when newOwner is the same as oldOwner.
+   * For when the request is not successful or when newOwner is the same as
+   * oldOwner.
    * For other successful requests, the other constructor should be used.
    */
   public OMVolumeSetOwnerResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java
index b50a923..c621025 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java
@@ -46,7 +46,7 @@ public class OMVolumeSetQuotaResponse extends OMClientResponse {
   }
 
   /**
-   * For when the request is not successful or it is a replay transaction.
+   * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
   public OMVolumeSetQuotaResponse(@Nonnull OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
index 7bef6b8..06e140b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
@@ -219,32 +219,4 @@ public class TestOMBucketCreateRequest extends TestBucketRequest {
             .setOwnerName(UUID.randomUUID().toString()).build();
     TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs);
   }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    OMRequest originalRequest = TestOMRequestUtils.createBucketRequest(
-        bucketName, volumeName, false, StorageTypeProto.SSD);
-    OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(
-        originalRequest);
-
-    // Manually add volume to DB table
-    addCreateVolumeToTable(volumeName, omMetadataManager);
-
-    // Execute the original request
-    omBucketCreateRequest.preExecute(ozoneManager);
-    omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-        ozoneManagerDoubleBufferHelper);
-
-    // Replay the transaction - Execute the same request again
-    OMClientResponse omClientResponse =
-        omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        omClientResponse.getOMResponse().getStatus());
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java
index f99e1b6..1037baa 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java
@@ -105,46 +105,4 @@ public class TestOMBucketDeleteRequest extends TestBucketRequest {
         .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket)
         .setClientId(UUID.randomUUID().toString()).build();
   }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-
-    // CreateBucket request
-    OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(
-        TestOMRequestUtils.createBucketRequest(bucketName, volumeName,
-            false, OzoneManagerProtocolProtos.StorageTypeProto.SSD));
-
-    // Create volume entry in DB
-    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-
-    // Execute CreateBucket request
-    omBucketCreateRequest.preExecute(ozoneManager);
-    omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 2,
-        ozoneManagerDoubleBufferHelper);
-
-    // Execute the original DeleteBucket request
-    OMRequest omRequest = createDeleteBucketRequest(volumeName, bucketName);
-    OMBucketDeleteRequest omBucketDeleteRequest = new OMBucketDeleteRequest(
-        omRequest);
-    omBucketDeleteRequest.preExecute(ozoneManager);
-    omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 4,
-        ozoneManagerDoubleBufferHelper);
-
-    // Create the bucket again
-    omBucketCreateRequest.preExecute(ozoneManager);
-    omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 10,
-        ozoneManagerDoubleBufferHelper);
-
-    // Replay the delete transaction - Execute the same request again
-    OMClientResponse omClientResponse =
-        omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 4,
-            ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        omClientResponse.getOMResponse().getStatus());
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java
index 0670c3e..cb0468e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java
@@ -119,33 +119,4 @@ public class TestOMBucketSetPropertyRequest extends TestBucketRequest {
         .setCmdType(OzoneManagerProtocolProtos.Type.SetBucketProperty)
         .setClientId(UUID.randomUUID().toString()).build();
   }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    // Create request to enable versioning
-    OMRequest omRequest = createSetBucketPropertyRequest(volumeName,
-        bucketName, true);
-    OMBucketSetPropertyRequest omBucketSetPropertyRequest =
-        new OMBucketSetPropertyRequest(omRequest);
-
-    // Execute the original request
-    omBucketSetPropertyRequest.preExecute(ozoneManager);
-    omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1,
-        ozoneManagerDoubleBufferHelper);
-
-    // Replay the transaction - Execute the same request again
-    OMClientResponse omClientResponse = omBucketSetPropertyRequest
-        .validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        omClientResponse.getOMResponse().getStatus());
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
index 7b6191c..c7aa6be 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
@@ -38,8 +38,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMRequest;
 
-import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addKeyToTable;
-import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addVolumeAndBucketToDB;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS;
@@ -401,31 +399,4 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         .setCreateFileRequest(createFileRequest).build();
 
   }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    OMRequest originalRequest = createFileRequest(volumeName, bucketName,
-        keyName, replicationFactor, replicationType, false, false);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        originalRequest);
-
-    // Manually add volume, bucket and key to DB table
-    addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager);
-    addKeyToTable(false, false, volumeName, bucketName, keyName, clientID,
-        replicationType, replicationFactor, 1L, omMetadataManager);
-
-    // Replay the transaction - Execute the createFile request again
-    OMClientResponse omClientResponse =
-        omFileCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        omClientResponse.getOMResponse().getStatus());
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java
index 5228c5a..5f704d3 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java
@@ -36,7 +36,7 @@ import org.junit.Test;
 public class TestOMKeyAclRequest extends TestOMKeyRequest {
 
   @Test
-  public void testReplayRequest() throws Exception {
+  public void testAclRequest() throws Exception {
     // Manually add volume, bucket and key to DB
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
@@ -59,13 +59,6 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
 
-    // Replay the original request
-    OMClientResponse replayResponse = omKeyAddAclRequest
-        .validateAndUpdateCache(ozoneManager, 2,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        replayResponse.getOMResponse().getStatus());
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
index f18ca82..b327b76 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
@@ -207,85 +207,6 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
     Assert.assertNull(omKeyInfo);
   }
 
-  @Test
-  public void testReplayRequest() throws Exception {
-
-    // Manually add Volume, Bucket to DB
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-    // Manually add Key to OpenKey table in DB
-    TestOMRequestUtils.addKeyToTable(true, false, volumeName, bucketName,
-        keyName, clientID, replicationType, replicationFactor, 1L,
-        omMetadataManager);
-
-    OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest());
-
-    OMKeyCommitRequest omKeyCommitRequest = new OMKeyCommitRequest(
-        modifiedOmRequest);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    // Key should not be there in key table, as validateAndUpdateCache is
-    // still not called.
-    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-    Assert.assertNull(omKeyInfo);
-
-    // Execute original KeyCommit request
-    omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 10L,
-        ozoneManagerDoubleBufferHelper);
-
-    // Replay the transaction - Execute the createKey request again
-    OMClientResponse replayResponse = omKeyCommitRequest.validateAndUpdateCache(
-        ozoneManager, 10L, ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        replayResponse.getOMResponse().getStatus());
-  }
-
-  @Test
-  public void testReplayRequestDeletesOpenKeyEntry() throws Exception {
-
-    // Manually add Volume, Bucket to DB
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-    // Manually add Key to OpenKey table in DB
-    TestOMRequestUtils.addKeyToTable(true, false, volumeName, bucketName,
-        keyName, clientID, replicationType, replicationFactor, 1L,
-        omMetadataManager);
-
-    OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest());
-    OMKeyCommitRequest omKeyCommitRequest = new OMKeyCommitRequest(
-        modifiedOmRequest);
-
-    // Execute original KeyCommit request
-    omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 10L,
-        ozoneManagerDoubleBufferHelper);
-
-    // Replay the Key Create request - add Key to OpenKey table manually again
-    TestOMRequestUtils.addKeyToTable(true, true, volumeName, bucketName,
-        keyName, clientID, replicationType, replicationFactor, 1L,
-        omMetadataManager);
-
-    // Key should be present in OpenKey table
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-    OmKeyInfo openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-    Assert.assertNotNull(openKeyInfo);
-
-    // Replay the transaction - Execute the createKey request again
-    OMClientResponse replayResponse = omKeyCommitRequest.validateAndUpdateCache(
-        ozoneManager, 10L, ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in DELETE_OPEN_KEY_ONLY response and delete the
-    // key from OpenKey table
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        replayResponse.getOMResponse().getStatus());
-    openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-    Assert.assertNull(openKeyInfo);
-  }
-
   /**
    * This method calls preExecute and verify the modified request.
    * @param originalOMRequest
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
index 7e9e093..b26505b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMRequest;
 
-import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addKeyToTable;
 import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addVolumeAndBucketToDB;
 
 /**
@@ -331,45 +330,4 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
 
   }
 
-  @Test
-  public void testReplayRequest() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setFactor(replicationFactor)
-        .setType(replicationType)
-        .build();
-
-    CreateKeyRequest.Builder req = CreateKeyRequest.newBuilder()
-        .setKeyArgs(keyArgs);
-    OMRequest originalRequest = OMRequest.newBuilder()
-        .setCreateKeyRequest(req)
-        .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey)
-        .setClientId(UUID.randomUUID().toString())
-        .build();
-
-    OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(
-        originalRequest);
-
-    // Manually add volume, bucket and key to DB table
-    addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager);
-    addKeyToTable(false, false, volumeName, bucketName, keyName, clientID,
-        replicationType, replicationFactor, 1L, omMetadataManager);
-
-    // Replay the transaction - Execute the createKey request again
-    OMClientResponse omClientResponse =
-        omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        omClientResponse.getOMResponse().getStatus());
-  }
-
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
index b60d68e..b8e5603 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
@@ -136,43 +136,6 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
             omClientResponse.getOMResponse().getStatus());
   }
 
-  @Test
-  public void testReplayRequest() throws Exception {
-    OMRequest modifiedOmRequest =
-        doPreExecute(createDeleteKeyRequest());
-
-    OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
-
-    // Add volume, bucket and key entries to OM DB.
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-
-    TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, 1L, omMetadataManager);
-
-    // Delete the key manually. Lets say the Delete Requests
-    // TransactionLogIndex is 10.
-    long deleteTrxnLogIndex = 10L;
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-    TestOMRequestUtils.deleteKey(ozoneKey, omMetadataManager, 10L);
-
-    // Create the same key again with TransactionLogIndex > Delete requests
-    // TransactionLogIndex
-    TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, 20L, omMetadataManager);
-
-    // Replay the original DeleteRequest.
-    OMClientResponse omClientResponse = omKeyDeleteRequest
-        .validateAndUpdateCache(ozoneManager, deleteTrxnLogIndex,
-            ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        omClientResponse.getOMResponse().getStatus());
-  }
-
   /**
    * This method calls preExecute and verify the modified request.
    * @param originalOmRequest
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java
index 10b45ad..31e6975 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java
@@ -27,7 +27,6 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
@@ -154,120 +153,4 @@ public class TestOMKeyPurgeRequestAndResponse extends TestOMKeyRequest {
           deletedKey));
     }
   }
-
-  @Test
-  public void testPurgeKeysAcrossBuckets() throws Exception {
-    String bucket1 = bucketName;
-    String bucket2 = UUID.randomUUID().toString();
-
-    // bucket1 is created during setup. Create bucket2 manually.
-    TestOMRequestUtils.addBucketToDB(volumeName, bucket2, omMetadataManager);
-
-    // Create and Delete keys in Bucket1 and Bucket2.
-    List<String> deletedKeyInBucket1 = createAndDeleteKeys(1, bucket1);
-    List<String> deletedKeyInBucket2 = createAndDeleteKeys(1, bucket2);
-    List<String> deletedKeyNames = new ArrayList<>();
-    deletedKeyNames.addAll(deletedKeyInBucket1);
-    deletedKeyNames.addAll(deletedKeyInBucket2);
-
-    // The keys should be present in the DeletedKeys table before purging
-    for (String deletedKey : deletedKeyNames) {
-      Assert.assertTrue(omMetadataManager.getDeletedTable().isExist(
-          deletedKey));
-    }
-
-    // Create PurgeKeysRequest to purge the deleted keys
-    DeletedKeys deletedKeysInBucket1 = DeletedKeys.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucket1)
-        .addAllKeys(deletedKeyInBucket1)
-        .build();
-    DeletedKeys deletedKeysInBucket2 = DeletedKeys.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucket2)
-        .addAllKeys(deletedKeyInBucket1)
-        .build();
-    PurgeKeysRequest purgeKeysRequest = PurgeKeysRequest.newBuilder()
-        .addDeletedKeys(deletedKeysInBucket1)
-        .addDeletedKeys(deletedKeysInBucket2)
-        .build();
-
-    OMRequest omRequest = OMRequest.newBuilder()
-        .setPurgeKeysRequest(purgeKeysRequest)
-        .setCmdType(Type.PurgeKeys)
-        .setClientId(UUID.randomUUID().toString())
-        .build();
-
-    OMRequest preExecutedRequest = preExecute(omRequest);
-    OMKeyPurgeRequest omKeyPurgeRequest =
-        new OMKeyPurgeRequest(preExecutedRequest);
-
-    omKeyPurgeRequest.validateAndUpdateCache(ozoneManager, 100L,
-        ozoneManagerDoubleBufferHelper);
-
-    OMResponse omResponse = OMResponse.newBuilder()
-        .setPurgeKeysResponse(PurgeKeysResponse.getDefaultInstance())
-        .setCmdType(Type.PurgeKeys)
-        .setStatus(Status.OK)
-        .build();
-
-    BatchOperation batchOperation =
-        omMetadataManager.getStore().initBatchOperation();
-
-    OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse(
-        omResponse, deletedKeyNames);
-    omKeyPurgeResponse.addToDBBatch(omMetadataManager, batchOperation);
-
-    // Do manual commit and see whether addToBatch is successful or not.
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // The keys should not exist in the DeletedKeys table
-    for (String deletedKey : deletedKeyNames) {
-      Assert.assertFalse(omMetadataManager.getDeletedTable().isExist(
-          deletedKey));
-    }
-  }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-
-    // Create and Delete keys. The keys should be moved to DeletedKeys table
-    Integer trxnLogIndex = new Integer(1);
-    List<String> deletedKeyNames = createAndDeleteKeys(trxnLogIndex, null);
-    int purgeRequestTrxnLogIndex = ++trxnLogIndex;
-
-    // The keys should be present in the DeletedKeys table before purging
-    for (String deletedKey : deletedKeyNames) {
-      Assert.assertTrue(omMetadataManager.getDeletedTable().isExist(
-          deletedKey));
-    }
-
-    // Execute PurgeKeys request to purge the keys from Deleted table.
-    // Create PurgeKeysRequest to replay the purge request
-    OMRequest omRequest = createPurgeKeysRequest(deletedKeyNames);
-    OMRequest preExecutedRequest = preExecute(omRequest);
-    OMKeyPurgeRequest omKeyPurgeRequest =
-        new OMKeyPurgeRequest(preExecutedRequest);
-    OMClientResponse omClientResponse = omKeyPurgeRequest
-        .validateAndUpdateCache(ozoneManager, purgeRequestTrxnLogIndex,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertTrue(omClientResponse.getOMResponse().getStatus().equals(
-        Status.OK));
-
-    // Create and delete the same keys again
-    createAndDeleteKeys(++trxnLogIndex, null);
-
-    // Replay the PurgeKeys request. It should not purge the keys deleted
-    // after the original request was played.
-    OMClientResponse replayResponse = omKeyPurgeRequest
-        .validateAndUpdateCache(ozoneManager, purgeRequestTrxnLogIndex,
-            ozoneManagerDoubleBufferHelper);
-
-    // Verify that the new deletedKeys exist in the DeletedKeys table
-    for (String deletedKey : deletedKeyNames) {
-      Assert.assertTrue(omMetadataManager.getDeletedTable().isExist(
-          deletedKey));
-    }
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java
index eb79d7a..fc7f9b8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java
@@ -22,11 +22,9 @@ import java.util.UUID;
 import org.junit.Assert;
 import org.junit.Test;
 
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMRequest;
@@ -200,106 +198,6 @@ public class TestOMKeyRenameRequest extends TestOMKeyRequest {
   }
 
   /**
-   * Test replay of RenameRequest when fromKey does not exist in DB.
-   */
-  @Test
-  public void testReplayRequest() throws Exception {
-    String toKeyName = UUID.randomUUID().toString();
-    OMRequest modifiedOmRequest = doPreExecute(
-        createRenameKeyRequest(toKeyName));
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-    TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName,
-        keyName, clientID, replicationType, replicationFactor, 1L,
-        omMetadataManager);
-
-    // Execute RenameRequest
-    OMKeyRenameRequest omKeyRenameRequest =
-        new OMKeyRenameRequest(modifiedOmRequest);
-    OMClientResponse omKeyRenameResponse =
-        omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 10L,
-            ozoneManagerDoubleBufferHelper);
-
-    // Commit Batch operation to add the transaction to DB
-    BatchOperation batchOperation = omMetadataManager.getStore()
-        .initBatchOperation();
-    omKeyRenameResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // Replay the RenameRequest.
-    OMClientResponse replayResponse = omKeyRenameRequest.validateAndUpdateCache(
-        ozoneManager, 10L, ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        replayResponse.getOMResponse().getStatus());
-  }
-
-  /**
-   * Test replay of RenameRequest when fromKey exists in DB.
-   */
-  @Test
-  public void testReplayRequestWhenFromKeyExists() throws Exception {
-
-    String toKeyName = UUID.randomUUID().toString();
-    OMRequest modifiedOmRequest = doPreExecute(
-        createRenameKeyRequest(toKeyName));
-
-    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        omMetadataManager);
-    TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, 1L, omMetadataManager);
-
-    // Execute RenameRequest
-    OMKeyRenameRequest omKeyRenameRequest =
-        new OMKeyRenameRequest(modifiedOmRequest);
-    OMClientResponse omKeyRenameResponse = omKeyRenameRequest
-        .validateAndUpdateCache(ozoneManager, 10L,
-            ozoneManagerDoubleBufferHelper);
-
-    // Commit Batch operation to add the transaction to DB
-    BatchOperation batchOperation = omMetadataManager.getStore()
-        .initBatchOperation();
-    omKeyRenameResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // Let's say the fromKey create transaction was also replayed. In this
-    // case, fromKey and toKey will both exist in the DB. Replaying the
-    // RenameRequest should then delete fromKey but not add toKey again.
-
-    // Replay CreateKey request for fromKey
-    TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, 1L, omMetadataManager);
-
-    // Verify fromKey exists in DB
-    String fromKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-    OmKeyInfo dbFromKeyInfo = omMetadataManager.getKeyTable().get(fromKey);
-    Assert.assertNotNull(dbFromKeyInfo);
-
-    // Replay original RenameRequest
-    OMKeyRenameResponse replayResponse =
-        (OMKeyRenameResponse) omKeyRenameRequest.validateAndUpdateCache(
-            ozoneManager, 10L, ozoneManagerDoubleBufferHelper);
-
-    // This replay response should delete fromKey from DB
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        replayResponse.getOMResponse().getStatus());
-    Assert.assertTrue(replayResponse.deleteFromKeyOnly());
-
-    // Commit response to DB
-    batchOperation = omMetadataManager.getStore().initBatchOperation();
-    replayResponse.addToDBBatch(omMetadataManager, batchOperation);
-    omMetadataManager.getStore().commitBatchOperation(batchOperation);
-
-    // Verify fromKey is deleted from DB
-    dbFromKeyInfo = omMetadataManager.getKeyTable().get(fromKey);
-    Assert.assertNull(dbFromKeyInfo);
-  }
-
-  /**
    * This method calls preExecute and verify the modified request.
    * @param originalOmRequest
    * @return OMRequest - modified request returned from preExecute.
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java
index c25ee7b..5690ff2 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java
@@ -40,7 +40,7 @@ import static org.mockito.Mockito.when;
 public class TestOMPrefixAclRequest extends TestOMKeyRequest {
 
   @Test
-  public void testReplayRequest() throws Exception {
+  public void testAclRequest() throws Exception {
     PrefixManager prefixManager = new PrefixManagerImpl(
         ozoneManager.getMetadataManager(), true);
     when(ozoneManager.getPrefixManager()).thenReturn(prefixManager);
@@ -66,16 +66,9 @@ public class TestOMPrefixAclRequest extends TestOMKeyRequest {
             ozoneManagerDoubleBufferHelper);
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
-
-    // Replay the original request
-    OMClientResponse replayResponse = omKeyPrefixAclRequest
-        .validateAndUpdateCache(ozoneManager, 2,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        replayResponse.getOMResponse().getStatus());
   }
 
+
   /**
    * Create OMRequest which encapsulates OMKeyAddAclRequest.
    */
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
index 27973ed..4ac1f49 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
@@ -244,30 +244,4 @@ public class TestOMVolumeCreateRequest extends TestOMVolumeRequest {
     Assert.assertNotEquals(original.getModificationTime(),
         updated.getModificationTime());
   }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-
-    String volumeName = UUID.randomUUID().toString();
-    String adminName = "user1";
-    String ownerName = "user1";
-    OMRequest originalRequest = createVolumeRequest(volumeName, adminName,
-        ownerName);
-    OMVolumeCreateRequest omVolumeCreateRequest =
-        new OMVolumeCreateRequest(originalRequest);
-
-    // Execute the original request
-    omVolumeCreateRequest.preExecute(ozoneManager);
-    omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-        ozoneManagerDoubleBufferHelper);
-
-    // Replay the transaction - Execute the same request again
-    OMClientResponse omClientResponse =
-        omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        omClientResponse.getOMResponse().getStatus());
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java
index 709f821..49f28d3 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java
@@ -157,42 +157,4 @@ public class TestOMVolumeDeleteRequest extends TestOMVolumeRequest {
         .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume)
         .setDeleteVolumeRequest(deleteVolumeRequest).build();
   }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-
-    // create volume request
-    String volumeName = UUID.randomUUID().toString();
-    String user = "user1";
-    OMVolumeCreateRequest omVolumeCreateRequest = new OMVolumeCreateRequest(
-        createVolumeRequest(volumeName, user, user));
-
-    // Execute createVolume request
-    omVolumeCreateRequest.preExecute(ozoneManager);
-    omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1,
-        ozoneManagerDoubleBufferHelper);
-
-    OMRequest originalDeleteRequest = deleteVolumeRequest(volumeName);
-    OMVolumeDeleteRequest omVolumeDeleteRequest =
-        new OMVolumeDeleteRequest(originalDeleteRequest);
-
-    // Execute the original request
-    omVolumeDeleteRequest.preExecute(ozoneManager);
-    omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 2,
-        ozoneManagerDoubleBufferHelper);
-
-    // Create the volume again
-    omVolumeCreateRequest.preExecute(ozoneManager);
-    omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 3,
-        ozoneManagerDoubleBufferHelper);
-
-    // Replay the delete transaction - Execute the same request again
-    OMClientResponse omClientResponse =
-        omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 2,
-            ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        omClientResponse.getOMResponse().getStatus());
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java
index 0e1ac54..4ccf195 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java
@@ -169,37 +169,6 @@ public class TestOMVolumeSetOwnerRequest extends TestOMVolumeRequest {
         omResponse.getStatus());
   }
 
-  @Test
-  public void testReplayRequest() throws Exception {
-    // create volume
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-    // create request to set new owner
-    String newOwnerName = "user2";
-    OMRequest originalRequest =
-        TestOMRequestUtils.createSetVolumePropertyRequest(volumeName,
-            newOwnerName);
-    OMVolumeSetOwnerRequest omVolumeSetOwnerRequest =
-        new OMVolumeSetOwnerRequest(originalRequest);
-
-    // Execute the original request
-    omVolumeSetOwnerRequest.preExecute(ozoneManager);
-    omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1,
-        ozoneManagerDoubleBufferHelper);
-
-    // Replay the transaction - Execute the same request again
-    OMClientResponse omClientResponse =
-        omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        omClientResponse.getOMResponse().getStatus());
-  }
-
 
   @Test
   public void testOwnSameVolumeTwice() throws Exception {
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java
index bd90222..4d78ef0 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java
@@ -150,34 +150,4 @@ public class TestOMVolumeSetQuotaRequest extends TestOMVolumeRequest {
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_REQUEST,
         omResponse.getStatus());
   }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-    // create volume
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-    // create request with quota set.
-    long quota = 100L;
-    OMRequest originalRequest =
-        TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quota);
-    OMVolumeSetQuotaRequest omVolumeSetQuotaRequest =
-        new OMVolumeSetQuotaRequest(originalRequest);
-
-    // Execute the original request
-    omVolumeSetQuotaRequest.preExecute(ozoneManager);
-    omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1,
-        ozoneManagerDoubleBufferHelper);
-
-    // Replay the transaction - Execute the same request again
-    OMClientResponse omClientResponse =
-        omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    // Replay should result in Replay response
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        omClientResponse.getOMResponse().getStatus());
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java
index 8c79e02..66a122f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java
@@ -119,37 +119,4 @@ public class TestOMVolumeAddAclRequest extends TestOMVolumeRequest {
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
         omResponse.getStatus());
   }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
-
-    OMRequest originalRequest = TestOMRequestUtils.createVolumeAddAclRequest(
-        volumeName, acl);
-
-    OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest(
-        originalRequest);
-    omVolumeAddAclRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse = omVolumeAddAclRequest
-        .validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-
-    // Replay the original request
-    OMClientResponse replayResponse = omVolumeAddAclRequest
-        .validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        replayResponse.getOMResponse().getStatus());
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java
index b1bbf13..b2eb0bf 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java
@@ -129,48 +129,4 @@ public class TestOMVolumeRemoveAclRequest extends TestOMVolumeRequest {
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
         omResponse.getStatus());
   }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
-
-    // add acl first
-    OMRequest addAclRequest = TestOMRequestUtils.createVolumeAddAclRequest(
-        volumeName, acl);
-    OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest(
-        addAclRequest);
-    omVolumeAddAclRequest.preExecute(ozoneManager);
-    OMClientResponse addAclResponse = omVolumeAddAclRequest
-        .validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        addAclResponse.getOMResponse().getStatus());
-
-    // remove acl
-    OMRequest removeAclRequest = TestOMRequestUtils
-        .createVolumeRemoveAclRequest(volumeName, acl);
-    OMVolumeRemoveAclRequest omVolumeRemoveAclRequest =
-        new OMVolumeRemoveAclRequest(removeAclRequest);
-    omVolumeRemoveAclRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse = omVolumeRemoveAclRequest
-        .validateAndUpdateCache(ozoneManager, 2,
-            ozoneManagerDoubleBufferHelper);
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-
-    // Replay the original request
-    OMClientResponse replayResponse = omVolumeRemoveAclRequest
-        .validateAndUpdateCache(ozoneManager, 2,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        replayResponse.getOMResponse().getStatus());
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java
index 6d0f2b1..087ba71 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java
@@ -132,39 +132,4 @@ public class TestOMVolumeSetAclRequest extends TestOMVolumeRequest {
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
         omResponse.getStatus());
   }
-
-  @Test
-  public void testReplayRequest() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String ownerName = "user1";
-
-    TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager);
-    TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager);
-
-    OzoneAcl userAccessAcl = OzoneAcl.parseAcl("user:bilbo:rw[ACCESS]");
-    OzoneAcl groupDefaultAcl = OzoneAcl.parseAcl(
-        "group:admin:rwdlncxy[DEFAULT]");
-
-    List<OzoneAcl> acls = Lists.newArrayList(userAccessAcl, groupDefaultAcl);
-
-    OMRequest originalRequest = TestOMRequestUtils.createVolumeSetAclRequest(
-        volumeName, acls);
-
-    OMVolumeSetAclRequest omVolumeSetAclRequest = new OMVolumeSetAclRequest(
-        originalRequest);
-    omVolumeSetAclRequest.preExecute(ozoneManager);
-
-    OMClientResponse omClientResponse = omVolumeSetAclRequest
-        .validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
-        omClientResponse.getOMResponse().getStatus());
-
-    OMClientResponse replayResponse = omVolumeSetAclRequest
-        .validateAndUpdateCache(ozoneManager, 1,
-            ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
-        replayResponse.getOMResponse().getStatus());
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java
index cca0dad..fbd3af0 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMResponse;
@@ -36,6 +37,7 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
+import java.util.ArrayList;
 import java.util.UUID;
 
 /**
@@ -75,7 +77,8 @@ public class TestOMDirectoryCreateResponse {
             .build();
 
     OMDirectoryCreateResponse omDirectoryCreateResponse =
-        new OMDirectoryCreateResponse(omResponse, omKeyInfo, null);
+        new OMDirectoryCreateResponse(omResponse, omKeyInfo,
+            new ArrayList<>(), Result.SUCCESS);
 
     omDirectoryCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
index f8b0a17..b2626da 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
@@ -152,7 +152,7 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
 
-    omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
+    omKeyDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
 
     // Do manual commit and see whether addToBatch is successful or not.
     omMetadataManager.getStore().commitBatchOperation(batchOperation);


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org