You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ra...@apache.org on 2021/05/13 04:10:41 UTC

[ozone] branch HDDS-2939 updated: HDDS-5201. [FSO] S3MultiPart: Use existing ozone key format for MPU Info in DB (#2228)

This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-2939 by this push:
     new 72d3106  HDDS-5201. [FSO] S3MultiPart: Use existing ozone key format for MPU Info in DB (#2228)
72d3106 is described below

commit 72d3106f591c3c880c83f83cec1244d3b0fee513
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Thu May 13 09:40:20 2021 +0530

    HDDS-5201. [FSO] S3MultiPart: Use existing ozone key format for MPU Info in DB (#2228)
---
 .../rpc/TestOzoneClientMultipartUploadWithFSO.java | 141 ++++++++---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  44 ++--
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  44 ++--
 .../hadoop/ozone/om/codec/OMDBDefinition.java      |  12 +-
 .../file/OMDirectoryCreateRequestWithFSO.java      |   2 +-
 .../S3InitiateMultipartUploadRequestWithFSO.java   |  11 +-
 .../multipart/S3MultipartUploadAbortRequest.java   |  27 +-
 .../S3MultipartUploadAbortRequestWithFSO.java      |  16 +-
 .../S3MultipartUploadCommitPartRequest.java        |  57 +++--
 .../S3MultipartUploadCommitPartRequestWithFSO.java | 239 +++---------------
 .../S3MultipartUploadCompleteRequest.java          | 117 ++++++---
 .../S3MultipartUploadCompleteRequestWithFSO.java   | 273 ++++++---------------
 .../S3InitiateMultipartUploadResponseWithFSO.java  |  15 +-
 .../multipart/S3MultipartUploadAbortResponse.java  |   9 +-
 .../S3MultipartUploadAbortResponseWithFSO.java     |  13 +-
 .../S3MultipartUploadCommitPartResponse.java       |   3 +
 ...S3MultipartUploadCommitPartResponseWithFSO.java |   9 +-
 .../S3MultipartUploadCompleteResponse.java         |  28 ++-
 .../S3MultipartUploadCompleteResponseWithFSO.java  |  41 ++--
 .../file/TestOMFileCreateRequestWithFSO.java       |   2 +
 .../key/TestOMAllocateBlockRequestWithFSO.java     |   2 +
 .../request/key/TestOMKeyCommitRequestWithFSO.java |   1 +
 .../request/key/TestOMKeyCreateRequestWithFSO.java |   5 +
 .../request/key/TestOMKeyDeleteRequestWithFSO.java |   2 +
 ...estS3InitiateMultipartUploadRequestWithFSO.java |  10 +-
 .../TestS3MultipartUploadAbortRequest.java         |  11 +-
 .../TestS3MultipartUploadAbortRequestWithFSO.java  |   2 +-
 .../TestS3MultipartUploadCommitPartRequest.java    |  30 ++-
 ...tS3MultipartUploadCommitPartRequestWithFSO.java |  16 +-
 ...estS3MultipartUploadCompleteRequestWithFSO.java |   7 +
 .../key/TestOMAllocateBlockResponseWithFSO.java    |   3 +
 .../key/TestOMKeyCommitResponseWithFSO.java        |   9 +-
 .../key/TestOMKeyCreateResponseWithFSO.java        |   1 +
 ...stS3InitiateMultipartUploadResponseWithFSO.java |  10 +-
 .../s3/multipart/TestS3MultipartResponse.java      |  32 ++-
 .../TestS3MultipartUploadAbortResponse.java        |  19 +-
 .../TestS3MultipartUploadAbortResponseWithFSO.java |  20 +-
 ...S3MultipartUploadCommitPartResponseWithFSO.java |  14 +-
 ...stS3MultipartUploadCompleteResponseWithFSO.java |  32 ++-
 39 files changed, 655 insertions(+), 674 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 7b49413..2c639ef 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone.client.rpc;
 
 import org.apache.commons.lang3.RandomUtils;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -29,6 +28,8 @@ import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneMultipartUpload;
+import org.apache.hadoop.ozone.client.OzoneMultipartUploadList;
 import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
@@ -561,7 +562,10 @@ public class TestOzoneClientMultipartUploadWithFSO {
 
     bucket.abortMultipartUpload(keyName, uploadID);
 
-    OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable().get(multipartKey);
+    String multipartOpenKey =
+        getMultipartOpenKey(uploadID, volumeName, bucketName, keyName,
+            metadataMgr);
+    OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable().get(multipartOpenKey);
     OmMultipartKeyInfo omMultipartKeyInfo =
         metadataMgr.getMultipartInfoTable().get(multipartKey);
     Assert.assertNull(omKeyInfo);
@@ -607,14 +611,14 @@ public class TestOzoneClientMultipartUploadWithFSO {
     Assert.assertEquals(3,
         ozoneMultipartUploadPartListParts.getPartInfoList().size());
 
-    verifyPartNamesInDB(volumeName, bucketName, parentDir, keyName, partsMap,
+    verifyPartNamesInDB(volumeName, bucketName, keyName, partsMap,
         ozoneMultipartUploadPartListParts, uploadID);
 
     Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
   }
 
   private void verifyPartNamesInDB(String volumeName, String bucketName,
-      String parentDir, String keyName, Map<Integer, String> partsMap,
+      String keyName, Map<Integer, String> partsMap,
       OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts,
       String uploadID) throws IOException {
 
@@ -633,13 +637,12 @@ public class TestOzoneClientMultipartUploadWithFSO {
 
     OMMetadataManager metadataMgr =
         cluster.getOzoneManager().getMetadataManager();
-    String multipartKey = getMultipartKey(uploadID, volumeName, bucketName,
-        keyName, metadataMgr);
+    String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName,
+        keyName, uploadID);
     OmMultipartKeyInfo omMultipartKeyInfo =
         metadataMgr.getMultipartInfoTable().get(multipartKey);
     Assert.assertNotNull(omMultipartKeyInfo);
 
-    long parentID = getParentID(volumeName, bucketName, keyName, metadataMgr);
     TreeMap<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap =
         omMultipartKeyInfo.getPartKeyInfoMap();
     for (Map.Entry<Integer, OzoneManagerProtocolProtos.PartKeyInfo> entry :
@@ -647,17 +650,16 @@ public class TestOzoneClientMultipartUploadWithFSO {
       OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo = entry.getValue();
       String partKeyName = partKeyInfo.getPartName();
 
-      // partKeyName format in DB - <parentID>/partFileName + ClientID
-      Assert.assertTrue("Invalid partKeyName format in DB",
-          partKeyName.startsWith(parentID + OzoneConsts.OM_KEY_PREFIX));
-      partKeyName = StringUtils.remove(partKeyName,
-          parentID + OzoneConsts.OM_KEY_PREFIX);
-
       // reconstruct full part name with volume, bucket, partKeyName
-      String fullKeyPartName = metadataMgr.getOzoneKey(volumeName, bucketName,
-          parentDir + partKeyName);
+      String fullKeyPartName =
+          metadataMgr.getOzoneKey(volumeName, bucketName, keyName);
+
+      // partKeyName format in DB - partKeyName + ClientID
+      Assert.assertTrue("Invalid partKeyName format in DB: " + partKeyName
+              + ", expected name:" + fullKeyPartName,
+          partKeyName.startsWith(fullKeyPartName));
 
-      listPartNames.remove(fullKeyPartName);
+      listPartNames.remove(partKeyName);
     }
 
     Assert.assertTrue("Wrong partKeyName format in DB!",
@@ -831,12 +833,94 @@ public class TestOzoneClientMultipartUploadWithFSO {
         });
   }
 
+  @Test
+  public void testListMultipartUpload() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String dirName = "dir1/dir2/dir3";
+    String key1 = "dir1" + "/key1";
+    String key2 = "dir1/dir2" + "/key2";
+    String key3 = dirName + "/key3";
+    List<String> keys = new ArrayList<>();
+    keys.add(key1);
+    keys.add(key2);
+    keys.add(key3);
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    // Initiate multipart upload
+    String uploadID1 = initiateMultipartUpload(bucket, key1, STAND_ALONE,
+        ONE);
+    String uploadID2 = initiateMultipartUpload(bucket, key2, STAND_ALONE,
+        ONE);
+    String uploadID3 = initiateMultipartUpload(bucket, key3, STAND_ALONE,
+        ONE);
+
+    // Upload Parts
+    // Uploading part 1 with less than min size
+    uploadPart(bucket, key1, uploadID1, 1, "data".getBytes(UTF_8));
+    uploadPart(bucket, key2, uploadID2, 1, "data".getBytes(UTF_8));
+    uploadPart(bucket, key3, uploadID3, 1, "data".getBytes(UTF_8));
+
+    OzoneMultipartUploadList listMPUs = bucket.listMultipartUploads("dir1");
+    Assert.assertEquals(3, listMPUs.getUploads().size());
+    List<String> expectedList = new ArrayList<>(keys);
+    for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
+      expectedList.remove(mpu.getKeyName());
+    }
+    Assert.assertEquals(0, expectedList.size());
+
+    listMPUs = bucket.listMultipartUploads("dir1/dir2");
+    Assert.assertEquals(2, listMPUs.getUploads().size());
+    expectedList = new ArrayList<>();
+    expectedList.add(key2);
+    expectedList.add(key3);
+    for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
+      expectedList.remove(mpu.getKeyName());
+    }
+    Assert.assertEquals(0, expectedList.size());
+
+    listMPUs = bucket.listMultipartUploads("dir1/dir2/dir3");
+    Assert.assertEquals(1, listMPUs.getUploads().size());
+    expectedList = new ArrayList<>();
+    expectedList.add(key3);
+    for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
+      expectedList.remove(mpu.getKeyName());
+    }
+    Assert.assertEquals(0, expectedList.size());
+
+    // partial key
+    listMPUs = bucket.listMultipartUploads("d");
+    Assert.assertEquals(3, listMPUs.getUploads().size());
+    expectedList = new ArrayList<>(keys);
+    for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
+      expectedList.remove(mpu.getKeyName());
+    }
+    Assert.assertEquals(0, expectedList.size());
+
+    // partial key
+    listMPUs = bucket.listMultipartUploads("");
+    Assert.assertEquals(3, listMPUs.getUploads().size());
+    expectedList = new ArrayList<>(keys);
+    for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
+      expectedList.remove(mpu.getKeyName());
+    }
+    Assert.assertEquals(0, expectedList.size());
+  }
+
   private String verifyUploadedPart(String volumeName, String bucketName,
       String keyName, String uploadID, String partName,
       OMMetadataManager metadataMgr) throws IOException {
-    String multipartKey = getMultipartKey(uploadID, volumeName, bucketName,
-        keyName, metadataMgr);
-    OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable().get(multipartKey);
+    String multipartOpenKey =
+        getMultipartOpenKey(uploadID, volumeName, bucketName, keyName,
+            metadataMgr);
+
+    String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName,
+        keyName, uploadID);
+    OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable().get(multipartOpenKey);
     OmMultipartKeyInfo omMultipartKeyInfo =
         metadataMgr.getMultipartInfoTable().get(multipartKey);
 
@@ -846,9 +930,6 @@ public class TestOzoneClientMultipartUploadWithFSO {
         omKeyInfo.getKeyName());
     Assert.assertEquals(uploadID, omMultipartKeyInfo.getUploadID());
 
-    long parentID = getParentID(volumeName, bucketName, keyName,
-        metadataMgr);
-
     TreeMap<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap =
         omMultipartKeyInfo.getPartKeyInfoMap();
     for (Map.Entry<Integer, OzoneManagerProtocolProtos.PartKeyInfo> entry :
@@ -857,21 +938,17 @@ public class TestOzoneClientMultipartUploadWithFSO {
       OmKeyInfo currentKeyPartInfo =
           OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
 
-      Assert.assertEquals(OzoneFSUtils.getFileName(keyName),
-          currentKeyPartInfo.getKeyName());
-
-      // prepare dbPartName <parentID>/partFileName
-      String partFileName = OzoneFSUtils.getFileName(partName);
-      String dbPartName = metadataMgr.getOzonePathKey(parentID, partFileName);
+      Assert.assertEquals(keyName, currentKeyPartInfo.getKeyName());
 
-      Assert.assertEquals(dbPartName, partKeyInfo.getPartName());
+      // verify dbPartName
+      Assert.assertEquals(partName, partKeyInfo.getPartName());
     }
     return multipartKey;
   }
 
-  private String getMultipartKey(String multipartUploadID, String volumeName,
-      String bucketName, String keyName, OMMetadataManager omMetadataManager)
-      throws IOException {
+  private String getMultipartOpenKey(String multipartUploadID,
+      String volumeName, String bucketName, String keyName,
+      OMMetadataManager omMetadataManager) throws IOException {
 
     String fileName = OzoneFSUtils.getFileName(keyName);
     long parentID = getParentID(volumeName, bucketName, keyName,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index dbda8b9..52128bf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1423,8 +1423,8 @@ public class KeyManagerImpl implements KeyManager {
     metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
         bucketName);
     try {
-      String multipartKey = getMultipartKey(volumeName, bucketName,
-          keyName, uploadID);
+      String multipartKey = metadataManager.getMultipartKey(volumeName,
+          bucketName, keyName, uploadID);
 
       OmMultipartKeyInfo multipartKeyInfo =
           metadataManager.getMultipartInfoTable().get(multipartKey);
@@ -1469,7 +1469,11 @@ public class KeyManagerImpl implements KeyManager {
 
         if (replicationType == null) {
           //if there are no parts, use the replicationType from the open key.
-
+          if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+            multipartKey =
+                getMultipartOpenKeyFSO(volumeName, bucketName, keyName,
+                    uploadID);
+          }
           OmKeyInfo omKeyInfo =
               metadataManager.getOpenKeyTable().get(multipartKey);
 
@@ -1541,28 +1545,20 @@ public class KeyManagerImpl implements KeyManager {
     return partName;
   }
 
-  private String getMultipartKey(String volumeName, String bucketName,
+  private String getMultipartOpenKeyFSO(String volumeName, String bucketName,
       String keyName, String uploadID) throws IOException {
-
-    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
-      OMMetadataManager metaMgr = ozoneManager.getMetadataManager();
-      String fileName = OzoneFSUtils.getFileName(keyName);
-      Iterator<Path> pathComponents = Paths.get(keyName).iterator();
-      String bucketKey = metaMgr.getBucketKey(volumeName, bucketName);
-      OmBucketInfo omBucketInfo =
-          metaMgr.getBucketTable().get(bucketKey);
-      long bucketId = omBucketInfo.getObjectID();
-      long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
-          keyName, metaMgr);
-
-      String multipartKey = metaMgr.getMultipartKey(parentID, fileName,
-          uploadID);
-
-      return multipartKey;
-    } else {
-      return metadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, uploadID);
-    }
+    OMMetadataManager metaMgr = ozoneManager.getMetadataManager();
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    String bucketKey = metaMgr.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo = metaMgr.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    long parentID =
+        OMFileRequest.getParentID(bucketId, pathComponents, keyName, metaMgr);
+
+    String multipartKey = metaMgr.getMultipartKey(parentID, fileName, uploadID);
+
+    return multipartKey;
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index b67346e..fa387f8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -106,6 +106,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * OM DB stores metadata as KV pairs in different column families.
    * <p>
    * OM DB Schema:
+   *
+   *
+   * Common Tables:
    * |----------------------------------------------------------------------|
    * |  Column Family     |        VALUE                                    |
    * |----------------------------------------------------------------------|
@@ -115,19 +118,31 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * | bucketTable        |     /volume/bucket-> BucketInfo                 |
    * |----------------------------------------------------------------------|
+   * | s3SecretTable      | s3g_access_key_id -> s3Secret                   |
+   * |----------------------------------------------------------------------|
+   * | dTokenTable        | OzoneTokenID -> renew_time                      |
+   * |----------------------------------------------------------------------|
+   * | prefixInfoTable    | prefix -> PrefixInfo                            |
+   * |----------------------------------------------------------------------|
+   * | multipartInfoTable | /volumeName/bucketName/keyName/uploadId ->...   |
+   * |----------------------------------------------------------------------|
+   * | transactionInfoTable| #TRANSACTIONINFO -> OMTransactionInfo          |
+   * |----------------------------------------------------------------------|
+   *
+   * Simple Tables:
+   * |----------------------------------------------------------------------|
+   * |  Column Family     |        VALUE                                    |
+   * |----------------------------------------------------------------------|
    * | keyTable           | /volumeName/bucketName/keyName->KeyInfo         |
    * |----------------------------------------------------------------------|
    * | deletedTable       | /volumeName/bucketName/keyName->RepeatedKeyInfo |
    * |----------------------------------------------------------------------|
    * | openKey            | /volumeName/bucketName/keyName/id->KeyInfo      |
    * |----------------------------------------------------------------------|
-   * | s3SecretTable      | s3g_access_key_id -> s3Secret                   |
-   * |----------------------------------------------------------------------|
-   * | dTokenTable        | OzoneTokenID -> renew_time                      |
-   * |----------------------------------------------------------------------|
-   * | prefixInfoTable    | prefix -> PrefixInfo                            |
+   *
+   * Prefix Tables:
    * |----------------------------------------------------------------------|
-   * |  multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...   |
+   * |  Column Family     |        VALUE                                    |
    * |----------------------------------------------------------------------|
    * |  directoryTable    | parentId/directoryName -> DirectoryInfo         |
    * |----------------------------------------------------------------------|
@@ -135,12 +150,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * |  openFileTable     | parentId/fileName/id -> KeyInfo                 |
    * |----------------------------------------------------------------------|
-   * |  multipartFileInfoTable | parentId/fileName/uploadId ->...           |
-   * |----------------------------------------------------------------------|
-   * |  deletedDirTable      | parentId/directoryName -> KeyInfo            |
-   * |----------------------------------------------------------------------|
-   * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
+   * |  deletedDirTable   | parentId/directoryName -> KeyInfo               |
    * |----------------------------------------------------------------------|
+   *
    */
 
   public static final String USER_TABLE = "userTable";
@@ -156,7 +168,6 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String DIRECTORY_TABLE = "directoryTable";
   public static final String FILE_TABLE = "fileTable";
   public static final String OPEN_FILE_TABLE = "openFileTable";
-  public static final String MULTIPARTFILEINFO_TABLE = "multipartFileInfoTable";
   public static final String DELETED_DIR_TABLE = "deletedDirectoryTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
@@ -182,7 +193,6 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private Table transactionInfoTable;
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
-  private Table<String, OmMultipartKeyInfo> multipartFileInfoTable;
   private Table deletedDirTable;
 
   // Epoch is used to generate the objectIDs. The most significant 2 bits of
@@ -284,9 +294,6 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmMultipartKeyInfo> getMultipartInfoTable() {
-    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
-      return multipartFileInfoTable;
-    }
     return multipartInfoTable;
   }
 
@@ -380,7 +387,6 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addTable(DIRECTORY_TABLE)
         .addTable(FILE_TABLE)
         .addTable(OPEN_FILE_TABLE)
-        .addTable(MULTIPARTFILEINFO_TABLE)
         .addTable(DELETED_DIR_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
@@ -460,10 +466,6 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
             OmKeyInfo.class);
     checkTableStatus(openFileTable, OPEN_FILE_TABLE);
 
-    multipartFileInfoTable = this.store.getTable(MULTIPARTFILEINFO_TABLE,
-            String.class, OmMultipartKeyInfo.class);
-    checkTableStatus(multipartFileInfoTable, MULTIPARTFILEINFO_TABLE);
-
     deletedDirTable = this.store.getTable(DELETED_DIR_TABLE, String.class,
         OmKeyInfo.class);
     checkTableStatus(deletedDirTable, DELETED_DIR_TABLE);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index f3716db..fd1579c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -171,15 +171,6 @@ public class OMDBDefinition implements DBDefinition {
                   OmKeyInfo.class,
                   new OmKeyInfoCodec(true));
 
-  public static final DBColumnFamilyDefinition<String, OmMultipartKeyInfo>
-          MULTIPART_FILEINFO_TABLE =
-          new DBColumnFamilyDefinition<>(
-                  OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE,
-                  String.class,
-                  new StringCodec(),
-                  OmMultipartKeyInfo.class,
-                  new OmMultipartKeyInfoCodec());
-
   public static final DBColumnFamilyDefinition<String, OmKeyInfo>
       DELETED_DIR_TABLE =
       new DBColumnFamilyDefinition<>(OmMetadataManagerImpl.DELETED_DIR_TABLE,
@@ -202,8 +193,7 @@ public class OMDBDefinition implements DBDefinition {
         VOLUME_TABLE, OPEN_KEY_TABLE, KEY_TABLE,
         BUCKET_TABLE, MULTIPART_INFO_TABLE, PREFIX_TABLE, DTOKEN_TABLE,
         S3_SECRET_TABLE, TRANSACTION_INFO_TABLE, DIRECTORY_TABLE,
-        FILE_TABLE, OPEN_FILE_TABLE, MULTIPART_FILEINFO_TABLE,
-        DELETED_DIR_TABLE};
+        FILE_TABLE, OPEN_FILE_TABLE, DELETED_DIR_TABLE};
   }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
index bb2ea12..a9ad85a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
@@ -297,7 +297,7 @@ public class OMDirectoryCreateRequestWithFSO extends OMDirectoryCreateRequest {
    * @param inheritAcls
    * @return the OmDirectoryInfo structure
    */
-  public static OmDirectoryInfo createDirectoryInfoWithACL(
+  private static OmDirectoryInfo createDirectoryInfoWithACL(
           String dirName, KeyArgs keyArgs, long objectId,
           long parentObjectId, long transactionIndex,
           List<OzoneAcl> inheritAcls) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
index b5d2172..347140c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
@@ -147,7 +147,11 @@ public class S3InitiateMultipartUploadRequestWithFSO
       // multipart upload request is received, it returns multipart upload id
       // for the key.
 
-      String multipartKey = omMetadataManager
+      String multipartKey = omMetadataManager.getMultipartKey(
+          volumeName, bucketName, keyName,
+          keyArgs.getMultipartUploadID());
+
+      String multipartOpenKey = omMetadataManager
           .getMultipartKey(pathInfoFSO.getLastKnownParentId(),
               pathInfoFSO.getLeafNodeName(), keyArgs.getMultipartUploadID());
 
@@ -189,7 +193,7 @@ public class S3InitiateMultipartUploadRequestWithFSO
               transactionLogIndex);
 
       OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
-              multipartKey, omKeyInfo, pathInfoFSO.getLeafNodeName(),
+          multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(),
               transactionLogIndex);
 
       // Add to cache
@@ -205,7 +209,8 @@ public class S3InitiateMultipartUploadRequestWithFSO
                       .setBucketName(requestedBucket)
                       .setKeyName(keyName)
                       .setMultipartUploadID(keyArgs.getMultipartUploadID()))
-                  .build(), multipartKeyInfo, omKeyInfo, missingParentInfos);
+                  .build(), multipartKeyInfo, omKeyInfo, multipartKey,
+              missingParentInfos);
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index fb67a20..5b4bdfc 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -123,11 +123,15 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
-      multipartKey = getMultipartKey(keyArgs.getMultipartUploadID(),
-          volumeName, bucketName, keyName, omMetadataManager);
+      multipartKey = omMetadataManager.getMultipartKey(
+          volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
+
+      String multipartOpenKey =
+          getMultipartOpenKey(keyArgs.getMultipartUploadID(), volumeName,
+              bucketName, keyName, omMetadataManager);
 
       OmKeyInfo omKeyInfo =
-          omMetadataManager.getOpenKeyTable().get(multipartKey);
+          omMetadataManager.getOpenKeyTable().get(multipartOpenKey);
       omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
 
       // If there is no entry in openKeyTable, then there is no multipart
@@ -160,14 +164,14 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
       // No need to add the cache entries to delete table, as the entries
       // in delete table are not used by any read/write operations.
       omMetadataManager.getOpenKeyTable().addCacheEntry(
-          new CacheKey<>(multipartKey),
+          new CacheKey<>(multipartOpenKey),
           new CacheValue<>(Optional.absent(), trxnLogIndex));
       omMetadataManager.getMultipartInfoTable().addCacheEntry(
           new CacheKey<>(multipartKey),
           new CacheValue<>(Optional.absent(), trxnLogIndex));
 
       omClientResponse = getOmClientResponse(ozoneManager, multipartKeyInfo,
-          multipartKey, omResponse, omBucketInfo);
+          multipartKey, multipartOpenKey, omResponse, omBucketInfo);
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
@@ -217,19 +221,20 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
 
   protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager,
       OmMultipartKeyInfo multipartKeyInfo, String multipartKey,
-      OMResponse.Builder omResponse, OmBucketInfo omBucketInfo) {
+      String multipartOpenKey, OMResponse.Builder omResponse,
+      OmBucketInfo omBucketInfo) {
 
     OMClientResponse omClientResponse = new S3MultipartUploadAbortResponse(
         omResponse.setAbortMultiPartUploadResponse(
-            MultipartUploadAbortResponse.newBuilder()).build(),
-        multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
+            MultipartUploadAbortResponse.newBuilder()).build(), multipartKey,
+        multipartOpenKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
         omBucketInfo.copyObject());
     return omClientResponse;
   }
 
-  protected String getMultipartKey(String multipartUploadID, String volumeName,
-      String bucketName, String keyName, OMMetadataManager omMetadataManager)
-      throws IOException {
+  protected String getMultipartOpenKey(String multipartUploadID,
+      String volumeName, String bucketName, String keyName,
+      OMMetadataManager omMetadataManager) throws IOException {
 
     String multipartKey = omMetadataManager.getMultipartKey(
         volumeName, bucketName, keyName, multipartUploadID);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequestWithFSO.java
index af6ebc7..fe4d49a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequestWithFSO.java
@@ -45,6 +45,7 @@ public class S3MultipartUploadAbortRequestWithFSO
     super(omRequest);
   }
 
+  @Override
   protected OMClientResponse getOmClientResponse(IOException exception,
       OMResponse.Builder omResponse) {
 
@@ -52,21 +53,24 @@ public class S3MultipartUploadAbortRequestWithFSO
         omResponse, exception));
   }
 
+  @Override
   protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager,
       OmMultipartKeyInfo multipartKeyInfo, String multipartKey,
-      OMResponse.Builder omResponse, OmBucketInfo omBucketInfo) {
+      String multipartOpenKey, OMResponse.Builder omResponse,
+      OmBucketInfo omBucketInfo) {
 
     OMClientResponse omClientResp = new S3MultipartUploadAbortResponseWithFSO(
         omResponse.setAbortMultiPartUploadResponse(
-            MultipartUploadAbortResponse.newBuilder()).build(),
-        multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
+            MultipartUploadAbortResponse.newBuilder()).build(), multipartKey,
+        multipartOpenKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
         omBucketInfo.copyObject());
     return omClientResp;
   }
 
-  protected String getMultipartKey(String multipartUploadID, String volumeName,
-      String bucketName, String keyName, OMMetadataManager omMetadataManager)
-      throws IOException {
+  @Override
+  protected String getMultipartOpenKey(String multipartUploadID,
+      String volumeName, String bucketName, String keyName,
+      OMMetadataManager omMetadataManager) throws IOException {
 
     String fileName = OzoneFSUtils.getFileName(keyName);
     Iterator<Path> pathComponents = Paths.get(keyName).iterator();
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index aac03bb..616b951 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -148,7 +148,7 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       String ozoneKey = omMetadataManager.getOzoneKey(
           volumeName, bucketName, keyName);
 
-      omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+      omKeyInfo = getOmKeyInfo(omMetadataManager, openKey, keyName);
 
       if (omKeyInfo == null) {
         throw new OMException("Failed to commit Multipart Upload key, as " +
@@ -230,20 +230,19 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       omResponse.setCommitMultiPartUploadResponse(
           MultipartCommitUploadPartResponse.newBuilder()
               .setPartName(partName));
-      omClientResponse = new S3MultipartUploadCommitPartResponse(
-          omResponse.build(), multipartKey, openKey,
-          multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
-          ozoneManager.isRatisEnabled(),
-          omBucketInfo.copyObject());
+      omClientResponse =
+          getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey,
+              omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(),
+              omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new S3MultipartUploadCommitPartResponse(
-          createErrorOMResponse(omResponse, exception), multipartKey, openKey,
-          multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
-          ozoneManager.isRatisEnabled(), copyBucketInfo);
+      omClientResponse =
+          getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey,
+              omKeyInfo, multipartKey, multipartKeyInfo,
+              createErrorOMResponse(omResponse, exception), copyBucketInfo);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -260,20 +259,35 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
     return omClientResponse;
   }
 
-  private String getOpenKey(String volumeName, String bucketName,
-      String keyName, OMMetadataManager omMetadataManager, long clientID) {
-    return omMetadataManager.getOpenKey(volumeName, bucketName,
-            keyName, clientID);
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected S3MultipartUploadCommitPartResponse getOmClientResponse(
+      OzoneManager ozoneManager,
+      OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, String openKey,
+      OmKeyInfo omKeyInfo, String multipartKey,
+      OmMultipartKeyInfo multipartKeyInfo, OMResponse build,
+      OmBucketInfo omBucketInfo) {
+
+    return new S3MultipartUploadCommitPartResponse(build, multipartKey, openKey,
+        multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
+        ozoneManager.isRatisEnabled(), omBucketInfo);
   }
 
-  private String getMultipartKey(String volumeName, String bucketName,
-      String keyName, OMMetadataManager omMetadataManager, String uploadID) {
-    return omMetadataManager.getMultipartKey(volumeName, bucketName,
-        keyName, uploadID);
+  protected OmKeyInfo getOmKeyInfo(OMMetadataManager omMetadataManager,
+      String openKey, String keyName) throws IOException {
+
+    return omMetadataManager.getOpenKeyTable().get(openKey);
+  }
+
+  protected String getOpenKey(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager, long clientID)
+      throws IOException {
+
+    return omMetadataManager
+        .getOpenKey(volumeName, bucketName, keyName, clientID);
   }
 
   @SuppressWarnings("parameternumber")
-  protected void logResult(OzoneManager ozoneManager,
+  private void logResult(OzoneManager ozoneManager,
       MultipartCommitUploadPartRequest multipartCommitUploadPartRequest,
       KeyArgs keyArgs, Map<String, String> auditMap, String volumeName,
       String bucketName, String keyName, IOException exception,
@@ -305,5 +319,10 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
     }
   }
 
+  private String getMultipartKey(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager, String uploadID) {
+    return omMetadataManager.getMultipartKey(volumeName, bucketName,
+        keyName, uploadID);
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestWithFSO.java
index 13995b9..dacdb53 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestWithFSO.java
@@ -18,38 +18,22 @@
 
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
-import com.google.common.base.Optional;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
-import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCommitPartResponse;
 import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCommitPartResponseWithFSO;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
 import java.io.IOException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.Iterator;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 
 /**
  * Handle Multipart upload commit upload part file.
@@ -62,197 +46,42 @@ public class S3MultipartUploadCommitPartRequestWithFSO
   }
 
   @Override
-  @SuppressWarnings("methodlength")
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
-    MultipartCommitUploadPartRequest multipartCommitUploadPartRequest =
-        getOmRequest().getCommitMultiPartUploadRequest();
-
-    KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs();
-    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    ozoneManager.getMetrics().incNumCommitMultipartUploadParts();
-
-    boolean acquiredLock = false;
-
-    IOException exception = null;
-    String dbPartName;
-    String fullKeyPartName = null;
-    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
-        getOmRequest());
-    OMClientResponse omClientResponse = null;
-    OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo = null;
-    String openFileKey = null;
-    OmKeyInfo omKeyInfo = null;
-    String multipartKey = null;
-    OmMultipartKeyInfo multipartKeyInfo = null;
-    Result result;
-    OmBucketInfo omBucketInfo;
-    OmBucketInfo copyBucketInfo = null;
-    try {
-      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
-      volumeName = keyArgs.getVolumeName();
-      bucketName = keyArgs.getBucketName();
-
-      // TODO to support S3 ACL later.
-      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
-
-      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
-
-      String fileName = OzoneFSUtils.getFileName(keyName);
-      Iterator<Path> pathComponents = Paths.get(keyName).iterator();
-      String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-      omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
-      long bucketId = omBucketInfo.getObjectID();
-      long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
-              keyName, omMetadataManager);
-
-      String uploadID = keyArgs.getMultipartUploadID();
-      multipartKey = omMetadataManager.getMultipartKey(parentID,
-          fileName, uploadID);
-
-      multipartKeyInfo = omMetadataManager.getMultipartInfoTable()
-          .get(multipartKey);
-
-      long clientID = multipartCommitUploadPartRequest.getClientID();
-
-      openFileKey = omMetadataManager.getOpenFileName(parentID, fileName,
-          clientID);
-
-      omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true,
-              omMetadataManager, openFileKey, keyName);
-
-      if (omKeyInfo == null) {
-        throw new OMException("Failed to commit Multipart Upload key, as " +
-            openFileKey + " entry is not found in the openFileTable",
-            KEY_NOT_FOUND);
-      }
-
-      // set the data size and location info list
-      omKeyInfo.setDataSize(keyArgs.getDataSize());
-      omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream()
-          .map(OmKeyLocationInfo::getFromProtobuf)
-          .collect(Collectors.toList()), true);
-      // Set Modification time
-      omKeyInfo.setModificationTime(keyArgs.getModificationTime());
-      // Set the UpdateID to current transactionLogIndex
-      omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
-
-      /**
-       * Format of PartName stored into MultipartInfoTable is,
-       * "<parentID>/fileName + ClientID".
-       *
-       * Contract is that all part names present in a multipart info will
-       * have same key prefix path.
-       *
-       * For example:
-       *        /vol1/buck1/a/b/c/part-1, /vol1/buck1/a/b/c/part-2,
-       *        /vol1/buck1/a/b/c/part-n
-       */
-      String ozoneFileKey = omMetadataManager.getOzonePathKey(parentID,
-          fileName);
-      dbPartName = ozoneFileKey + clientID;
-
-      if (multipartKeyInfo == null) {
-        // This can occur when user started uploading part by the time commit
-        // of that part happens, in between the user might have requested
-        // abort multipart upload. If we just throw exception, then the data
-        // will not be garbage collected, so move this part to delete table
-        // and throw error
-        // Move this part to delete table.
-        throw new OMException("No such Multipart upload is with specified " +
-            "uploadId " + uploadID,
-            OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-      }
-
-      int partNumber = keyArgs.getMultipartNumber();
-      oldPartKeyInfo = multipartKeyInfo.getPartKeyInfo(partNumber);
-
-      // Build this multipart upload part info.
-      OzoneManagerProtocolProtos.PartKeyInfo.Builder partKeyInfo =
-          OzoneManagerProtocolProtos.PartKeyInfo.newBuilder();
-      partKeyInfo.setPartName(dbPartName);
-      partKeyInfo.setPartNumber(partNumber);
-      partKeyInfo.setPartKeyInfo(omKeyInfo.getProtobuf(fileName,
-          getOmRequest().getVersion()));
-
-      // Add this part information in to multipartKeyInfo.
-      multipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo.build());
-
-      // Set the UpdateID to current transactionLogIndex
-      multipartKeyInfo.setUpdateID(trxnLogIndex,
-          ozoneManager.isRatisEnabled());
-
-      // OldPartKeyInfo will be deleted. Its updateID will be set in
-      // S3MultipartUplodaCommitPartResponse before being added to
-      // DeletedKeyTable.
-
-      // Delete from open key table and add it to multipart info table.
-      // No need to add cache entries to delete table, as no
-      // read/write requests that info for validation.
-      omMetadataManager.getMultipartInfoTable().addCacheEntry(
-          new CacheKey<>(multipartKey),
-          new CacheValue<>(Optional.of(multipartKeyInfo),
-              trxnLogIndex));
-
-      omMetadataManager.getOpenKeyTable().addCacheEntry(
-          new CacheKey<>(openFileKey),
-          new CacheValue<>(Optional.absent(), trxnLogIndex));
-
-      long scmBlockSize = ozoneManager.getScmBlockSize();
-      int factor = omKeyInfo.getFactor().getNumber();
-      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
-      // Block was pre-requested and UsedBytes updated when createKey and
-      // AllocatedBlock. The space occupied by the Key shall be based on
-      // the actual Key size, and the total Block size applied before should
-      // be subtracted.
-      long correctedSpace = omKeyInfo.getDataSize() * factor -
-          keyArgs.getKeyLocationsList().size() * scmBlockSize * factor;
-      omBucketInfo.incrUsedBytes(correctedSpace);
-
-      // Prepare response. Sets user given full key part name in 'partName'
-      // attribute in response object.
-      String fullOzoneKeyName = omMetadataManager.getOzoneKey(
-              volumeName, bucketName, keyName);
-      fullKeyPartName = fullOzoneKeyName + clientID;
-      omResponse.setCommitMultiPartUploadResponse(
-          MultipartCommitUploadPartResponse.newBuilder()
-              .setPartName(fullKeyPartName));
-
-      omClientResponse = new S3MultipartUploadCommitPartResponseWithFSO(
-          omResponse.build(), multipartKey, openFileKey,
-          multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
-          ozoneManager.isRatisEnabled(),
-          omBucketInfo.copyObject());
+  protected String getOpenKey(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager, long clientID)
+      throws IOException {
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    long parentID = OMFileRequest
+        .getParentID(bucketId, pathComponents, keyName, omMetadataManager);
+
+    return omMetadataManager.getOpenFileName(parentID, fileName, clientID);
+  }
 
-      result = Result.SUCCESS;
-    } catch (IOException ex) {
-      result = Result.FAILURE;
-      exception = ex;
-      omClientResponse = new S3MultipartUploadCommitPartResponseWithFSO(
-          createErrorOMResponse(omResponse, exception), multipartKey,
-          openFileKey, multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
-          ozoneManager.isRatisEnabled(), copyBucketInfo);
-    } finally {
-      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
-          omDoubleBufferHelper);
-      if (acquiredLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK,
-            volumeName, bucketName);
-      }
-    }
+  @Override
+  protected OmKeyInfo getOmKeyInfo(OMMetadataManager omMetadataManager,
+      String openKey, String keyName) throws IOException {
 
-    logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs,
-            auditMap, volumeName, bucketName, keyName, exception,
-            fullKeyPartName, result);
+    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+        omMetadataManager, openKey, keyName);
+  }
 
-    return omClientResponse;
+  @Override
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected S3MultipartUploadCommitPartResponse getOmClientResponse(
+      OzoneManager ozoneManager,
+      OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, String openKey,
+      OmKeyInfo omKeyInfo, String multipartKey,
+      OmMultipartKeyInfo multipartKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse build, OmBucketInfo omBucketInfo) {
+
+    return new S3MultipartUploadCommitPartResponseWithFSO(build, multipartKey,
+        openKey, multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
+        ozoneManager.isRatisEnabled(), omBucketInfo);
   }
 
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index dcd0b7c..061bb7b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -141,18 +141,19 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
       String ozoneKey = omMetadataManager.getOzoneKey(
           volumeName, bucketName, keyName);
 
+      String dbOzoneKey =
+          getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName);
+
+      String dbMultipartOpenKey =
+          getDBMultipartOpenKey(volumeName, bucketName, keyName, uploadID,
+              omMetadataManager);
+
       OmMultipartKeyInfo multipartKeyInfo = omMetadataManager
           .getMultipartInfoTable().get(multipartKey);
 
       // Check for directory exists with same name, if it exists throw error. 
-      if (ozoneManager.getEnableFileSystemPaths()) {
-        if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
-                omMetadataManager)) {
-          throw new OMException("Can not Complete MPU for file: " + keyName +
-                  " as there is already directory in the given path",
-                  NOT_A_FILE);
-        }
-      }
+      checkDirectoryAlreadyExists(ozoneManager, volumeName, bucketName, keyName,
+          omMetadataManager);
 
       if (multipartKeyInfo == null) {
         throw new OMException(
@@ -184,10 +185,10 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
 
         // All parts have same replication information. Here getting from last
         // part.
-        OmKeyInfo omKeyInfo = getOmKeyInfo(ozoneManager, trxnLogIndex, keyArgs,
-                volumeName, bucketName, keyName, multipartKey,
-                omMetadataManager, ozoneKey, partKeyInfoMap, partLocationInfos,
-                dataSize);
+        OmKeyInfo omKeyInfo =
+            getOmKeyInfo(ozoneManager, trxnLogIndex, keyArgs, volumeName,
+                bucketName, keyName, dbMultipartOpenKey, omMetadataManager,
+                dbOzoneKey, partKeyInfoMap, partLocationInfos, dataSize);
 
         //Find all unused parts.
         List<OmKeyInfo> unUsedParts = new ArrayList<>();
@@ -199,8 +200,8 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
           }
         }
 
-        updateCache(omMetadataManager, ozoneKey, multipartKey, omKeyInfo,
-            trxnLogIndex);
+        updateCache(omMetadataManager, dbOzoneKey, dbMultipartOpenKey,
+            multipartKey, omKeyInfo, trxnLogIndex);
 
         omResponse.setCompleteMultiPartUploadResponse(
             MultipartUploadCompleteResponse.newBuilder()
@@ -209,8 +210,9 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
                 .setKey(keyName)
                 .setHash(DigestUtils.sha256Hex(keyName)));
 
-        omClientResponse = new S3MultipartUploadCompleteResponse(
-            omResponse.build(), multipartKey, omKeyInfo, unUsedParts);
+        omClientResponse =
+            getOmClientResponse(multipartKey, omResponse, dbMultipartOpenKey,
+                omKeyInfo, unUsedParts);
 
         result = Result.SUCCESS;
       } else {
@@ -223,8 +225,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new S3MultipartUploadCompleteResponse(
-          createErrorOMResponse(omResponse, exception));
+      omClientResponse = getOmClientResponse(omResponse, exception);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -240,6 +241,33 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
     return omClientResponse;
   }
 
+  protected S3MultipartUploadCompleteResponse getOmClientResponse(
+      OMResponse.Builder omResponse, IOException exception) {
+    return new S3MultipartUploadCompleteResponse(
+        createErrorOMResponse(omResponse, exception));
+  }
+
+  protected OMClientResponse getOmClientResponse(String multipartKey,
+      OMResponse.Builder omResponse, String dbMultipartOpenKey,
+      OmKeyInfo omKeyInfo, List<OmKeyInfo> unUsedParts) {
+
+    return new S3MultipartUploadCompleteResponse(omResponse.build(),
+        multipartKey, dbMultipartOpenKey, omKeyInfo, unUsedParts);
+  }
+
+  protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager,
+      String volumeName, String bucketName, String keyName,
+      OMMetadataManager omMetadataManager) throws IOException {
+    if (ozoneManager.getEnableFileSystemPaths()) {
+      if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
+              omMetadataManager)) {
+        throw new OMException("Can not Complete MPU for file: " + keyName +
+                " as there is already directory in the given path",
+                NOT_A_FILE);
+      }
+    }
+  }
+
   @SuppressWarnings("checkstyle:ParameterNumber")
   protected void logResult(OzoneManager ozoneManager,
       MultipartUploadCompleteRequest multipartUploadCompleteRequest,
@@ -274,7 +302,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
   @SuppressWarnings("checkstyle:ParameterNumber")
   protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex,
       KeyArgs keyArgs, String volumeName, String bucketName, String keyName,
-      String multipartKey, OMMetadataManager omMetadataManager,
+      String multipartOpenKey, OMMetadataManager omMetadataManager,
       String ozoneKey, TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
       List<OmKeyLocationInfo> partLocationInfos, long dataSize)
           throws IOException {
@@ -291,7 +319,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
           OmKeyLocationInfoGroup(0, partLocationInfos, true);
 
       // Get the objectID of the key from OpenKeyTable
-      OmKeyInfo dbOpenKeyInfo = getOmKeyInfoFromOpenKeyTable(multipartKey,
+      OmKeyInfo dbOpenKeyInfo = getOmKeyInfoFromOpenKeyTable(multipartOpenKey,
               keyName, omMetadataManager);
 
       // A newly created key, this is the first version.
@@ -332,6 +360,18 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
     // FSO is disabled. Do nothing.
   }
 
+  protected String getDBOzoneKey(OMMetadataManager omMetadataManager,
+      String volumeName, String bucketName, String keyName) throws IOException {
+    return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
+  }
+
+  protected String getDBMultipartOpenKey(String volumeName, String bucketName,
+      String keyName, String uploadID, OMMetadataManager omMetadataManager)
+      throws IOException {
+    return omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, uploadID);
+  }
+
   protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneKey,
       String keyName, OMMetadataManager omMetadataManager) throws IOException {
     return omMetadataManager.getKeyTable().get(dbOzoneKey);
@@ -342,7 +382,15 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
     return omMetadataManager.getOpenKeyTable().get(dbMultipartKey);
   }
 
-  protected int getPartsListSize(String requestedVolume,
+  protected void addKeyTableCacheEntry(OMMetadataManager omMetadataManager,
+      String dbOzoneKey, OmKeyInfo omKeyInfo, long transactionLogIndex) {
+
+    // Add key entry to file table.
+    omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(dbOzoneKey),
+        new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
+  }
+
+  private int getPartsListSize(String requestedVolume,
       String requestedBucket, String keyName, String ozoneKey,
       List<Integer> partNumbers,
       List<OzoneManagerProtocolProtos.Part> partsList) throws OMException {
@@ -368,7 +416,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
   }
 
   @SuppressWarnings("checkstyle:ParameterNumber")
-  protected long getMultipartDataSize(String requestedVolume,
+  private long getMultipartDataSize(String requestedVolume,
       String requestedBucket, String keyName, String ozoneKey,
       TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
       int partsListSize, List<OmKeyLocationInfo> partLocationInfos,
@@ -386,8 +434,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
 
       String dbPartName = null;
       if (partKeyInfo != null) {
-        dbPartName = preparePartName(requestedVolume, requestedBucket, keyName,
-                partKeyInfo, ozoneManager.getMetadataManager());
+        dbPartName = partKeyInfo.getPartName();
       }
       if (!StringUtils.equals(partName, dbPartName)) {
         String omPartName = partKeyInfo == null ? null : dbPartName;
@@ -430,34 +477,26 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
     return dataSize;
   }
 
-  protected String preparePartName(String requestedVolume,
-      String requestedBucket, String keyName, PartKeyInfo partKeyInfo,
-      OMMetadataManager omMetadataManager) {
-
-    return partKeyInfo.getPartName();
-  }
-
   private static String failureMessage(String volume, String bucket,
       String keyName) {
     return "Complete Multipart Upload Failed: volume: " +
         volume + " bucket: " + bucket + " key: " + keyName;
   }
 
-  protected void updateCache(OMMetadataManager omMetadataManager,
-      String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo,
-      long transactionLogIndex) {
+  private void updateCache(OMMetadataManager omMetadataManager,
+      String dbOzoneKey, String dbMultipartOpenKey, String dbMultipartKey,
+      OmKeyInfo omKeyInfo, long transactionLogIndex) {
     // Update cache.
     // 1. Add key entry to key table.
     // 2. Delete multipartKey entry from openKeyTable and multipartInfo table.
-    omMetadataManager.getKeyTable().addCacheEntry(
-        new CacheKey<>(ozoneKey),
-        new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
+    addKeyTableCacheEntry(omMetadataManager, dbOzoneKey, omKeyInfo,
+        transactionLogIndex);
 
     omMetadataManager.getOpenKeyTable().addCacheEntry(
-        new CacheKey<>(multipartKey),
+        new CacheKey<>(dbMultipartOpenKey),
         new CacheValue<>(Optional.absent(), transactionLogIndex));
     omMetadataManager.getMultipartInfoTable().addCacheEntry(
-        new CacheKey<>(multipartKey),
+        new CacheKey<>(dbMultipartKey),
         new CacheValue<>(Optional.absent(), transactionLogIndex));
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java
index fd644c4..12539dc 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java
@@ -18,42 +18,27 @@
 
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
-import com.google.common.base.Optional;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
-import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse;
 import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponseWithFSO;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
-import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
 
 /**
@@ -70,160 +55,26 @@ public class S3MultipartUploadCompleteRequestWithFSO
   }
 
   @Override
-  @SuppressWarnings("methodlength")
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
-    MultipartUploadCompleteRequest multipartUploadCompleteRequest =
-        getOmRequest().getCompleteMultiPartUploadRequest();
-
-    KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs();
-
-    List<OzoneManagerProtocolProtos.Part> partsList =
-        multipartUploadCompleteRequest.getPartsListList();
-    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    final String requestedVolume = volumeName;
-    final String requestedBucket = bucketName;
-    String keyName = keyArgs.getKeyName();
-    String uploadID = keyArgs.getMultipartUploadID();
-    String dbMultipartKey;
-
-    ozoneManager.getMetrics().incNumCompleteMultipartUploads();
-
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    boolean acquiredLock = false;
-    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
-        getOmRequest());
-    OMClientResponse omClientResponse = null;
-    IOException exception = null;
-    Result result;
-    try {
-      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
-      volumeName = keyArgs.getVolumeName();
-      bucketName = keyArgs.getBucketName();
-
-      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
-
-      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
-
-      String fileName = OzoneFSUtils.getFileName(keyName);
-      Path keyPath = Paths.get(keyName);
-      OMFileRequest.OMPathInfoWithFSO pathInfoFSO =
-              OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
-                      volumeName, bucketName, keyName, keyPath);
-      long parentID = pathInfoFSO.getLastKnownParentId();
-
-      dbMultipartKey = omMetadataManager.getMultipartKey(parentID,
-              fileName, uploadID);
-
-      String dbOzoneKey = omMetadataManager.getOzonePathKey(parentID, fileName);
-
-      String ozoneKey = omMetadataManager.getOzoneKey(
-              volumeName, bucketName, keyName);
-
-      OmMultipartKeyInfo multipartKeyInfo =
-              omMetadataManager.getMultipartInfoTable().get(dbMultipartKey);
-
-      // Check for directory exists with same name, if it exists throw error.
-      if (pathInfoFSO.getDirectoryResult() == DIRECTORY_EXISTS) {
-        throw new OMException("Can not Complete MPU for file: " + keyName +
-                " as there is already directory in the given path",
-                NOT_A_FILE);
-      }
-
-      if (multipartKeyInfo == null) {
-        throw new OMException(
-            failureMessage(requestedVolume, requestedBucket, keyName),
-            OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-      }
-      TreeMap<Integer, PartKeyInfo> partKeyInfoMap =
-          multipartKeyInfo.getPartKeyInfoMap();
-
-      if (partsList.size() > 0) {
-        if (partKeyInfoMap.size() == 0) {
-          LOG.error("Complete MultipartUpload failed for key {} , MPU Key has" +
-                  " no parts in OM, parts given to upload are {}", ozoneKey,
-              partsList);
-          throw new OMException(
-              failureMessage(requestedVolume, requestedBucket, keyName),
-              OMException.ResultCodes.INVALID_PART);
-        }
-
-        // First Check for Invalid Part Order.
-        List< Integer > partNumbers = new ArrayList<>();
-        int partsListSize = getPartsListSize(requestedVolume,
-                requestedBucket, keyName, ozoneKey, partNumbers, partsList);
-
-        List<OmKeyLocationInfo> partLocationInfos = new ArrayList<>();
-        long dataSize = getMultipartDataSize(requestedVolume, requestedBucket,
-                keyName, ozoneKey, partKeyInfoMap, partsListSize,
-                partLocationInfos, partsList, ozoneManager);
-
-        // All parts have same replication information. Here getting from last
-        // part.
-        OmKeyInfo omKeyInfo = getOmKeyInfo(ozoneManager, trxnLogIndex, keyArgs,
-                volumeName, bucketName, keyName, dbMultipartKey,
-                omMetadataManager, dbOzoneKey, partKeyInfoMap,
-                partLocationInfos, dataSize);
-
-        //Find all unused parts.
-        List< OmKeyInfo > unUsedParts = new ArrayList<>();
-        for (Map.Entry< Integer, PartKeyInfo > partKeyInfo :
-            partKeyInfoMap.entrySet()) {
-          if (!partNumbers.contains(partKeyInfo.getKey())) {
-            unUsedParts.add(OmKeyInfo
-                .getFromProtobuf(partKeyInfo.getValue().getPartKeyInfo()));
-          }
-        }
-
-        updateCache(omMetadataManager, dbOzoneKey, dbMultipartKey, omKeyInfo,
-            trxnLogIndex);
-
-        omResponse.setCompleteMultiPartUploadResponse(
-            MultipartUploadCompleteResponse.newBuilder()
-                .setVolume(requestedVolume)
-                .setBucket(requestedBucket)
-                .setKey(keyName)
-                .setHash(DigestUtils.sha256Hex(keyName)));
-
-        omClientResponse = new S3MultipartUploadCompleteResponseWithFSO(
-            omResponse.build(), dbMultipartKey, omKeyInfo, unUsedParts);
-
-        result = Result.SUCCESS;
-      } else {
-        throw new OMException(
-            failureMessage(requestedVolume, requestedBucket, keyName) +
-            " because of empty part list",
-            OMException.ResultCodes.INVALID_REQUEST);
-      }
-
-    } catch (IOException ex) {
-      result = Result.FAILURE;
-      exception = ex;
-      omClientResponse = new S3MultipartUploadCompleteResponseWithFSO(
-          createErrorOMResponse(omResponse, exception));
-    } finally {
-      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
-          omDoubleBufferHelper);
-      if (acquiredLock) {
-        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK,
-            volumeName, bucketName);
-      }
+  protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager,
+      String volumeName, String bucketName, String keyName,
+      OMMetadataManager omMetadataManager) throws IOException {
+
+    Path keyPath = Paths.get(keyName);
+    OMFileRequest.OMPathInfoWithFSO pathInfoFSO =
+        OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+            volumeName, bucketName, keyName, keyPath);
+    // Check for directory exists with same name, if it exists throw error.
+    if (pathInfoFSO.getDirectoryResult() == DIRECTORY_EXISTS) {
+      throw new OMException("Can not Complete MPU for file: " + keyName +
+          " as there is already directory in the given path",
+          NOT_A_FILE);
     }
-
-    logResult(ozoneManager, multipartUploadCompleteRequest, partsList,
-            auditMap, volumeName, bucketName, keyName, exception, result);
-
-    return omClientResponse;
   }
 
+  @Override
   protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneFileKey,
       String keyName, OMMetadataManager omMetadataManager) throws IOException {
-    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+    return OMFileRequest.getOmKeyInfoFromFileTable(false,
             omMetadataManager, dbOzoneFileKey, keyName);
   }
 
@@ -235,21 +86,12 @@ public class S3MultipartUploadCompleteRequestWithFSO
   }
 
   @Override
-  protected void updateCache(OMMetadataManager omMetadataManager,
-      String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo,
-      long transactionLogIndex) {
-    // Update cache.
-    // 1. Add key entry to key table.
-    // 2. Delete multipartKey entry from openKeyTable and multipartInfo table.
-    OMFileRequest.addFileTableCacheEntry(omMetadataManager, ozoneKey,
-            omKeyInfo, omKeyInfo.getFileName(), transactionLogIndex);
+  protected void addKeyTableCacheEntry(OMMetadataManager omMetadataManager,
+      String ozoneKey, OmKeyInfo omKeyInfo, long transactionLogIndex) {
 
-    omMetadataManager.getOpenKeyTable().addCacheEntry(
-            new CacheKey<>(multipartKey),
-            new CacheValue<>(Optional.absent(), transactionLogIndex));
-    omMetadataManager.getMultipartInfoTable().addCacheEntry(
-            new CacheKey<>(multipartKey),
-            new CacheValue<>(Optional.absent(), transactionLogIndex));
+    // Add key entry to file table.
+    OMFileRequest.addFileTableCacheEntry(omMetadataManager, ozoneKey, omKeyInfo,
+        omKeyInfo.getFileName(), transactionLogIndex);
   }
 
   @Override
@@ -261,24 +103,67 @@ public class S3MultipartUploadCompleteRequestWithFSO
   }
 
   @Override
-  protected String preparePartName(String requestedVolume,
-      String requestedBucket, String keyName, PartKeyInfo partKeyInfo,
-      OMMetadataManager omMetadataManager) {
+  protected String getDBOzoneKey(OMMetadataManager omMetadataManager,
+      String volumeName, String bucketName, String keyName)throws IOException {
 
-    String parentPath = OzoneFSUtils.getParent(keyName);
-    StringBuffer keyPath = new StringBuffer(parentPath);
-    String partFileName = OzoneFSUtils.getFileName(partKeyInfo.getPartName());
-    keyPath.append(partFileName);
+    long parentId =
+        getParentId(omMetadataManager, volumeName, bucketName, keyName);
+
+    String fileName = keyName;
+    Path filePath = Paths.get(keyName).getFileName();
+    if (filePath != null) {
+      fileName = filePath.toString();
+    }
+
+    return omMetadataManager.getOzonePathKey(parentId, fileName);
+  }
+
+  @Override
+  protected String getDBMultipartOpenKey(String volumeName, String bucketName,
+      String keyName, String uploadID, OMMetadataManager omMetadataManager)
+      throws IOException {
+
+    long parentId =
+        getParentId(omMetadataManager, volumeName, bucketName, keyName);
+
+    String fileName = keyName;
+    Path filePath = Paths.get(keyName).getFileName();
+    if (filePath != null) {
+      fileName = filePath.toString();
+    }
+
+    return omMetadataManager.getMultipartKey(parentId, fileName, uploadID);
+  }
+
+  @Override
+  protected S3MultipartUploadCompleteResponse getOmClientResponse(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      IOException exception) {
+
+    return new S3MultipartUploadCompleteResponseWithFSO(
+        createErrorOMResponse(omResponse, exception));
+  }
+
+  @Override
+  protected OMClientResponse getOmClientResponse(String multipartKey,
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      String dbMultipartOpenKey, OmKeyInfo omKeyInfo,
+      List<OmKeyInfo> unUsedParts) {
 
-    return omMetadataManager.getOzoneKey(requestedVolume,
-        requestedBucket, keyPath.toString());
+    return new S3MultipartUploadCompleteResponseWithFSO(omResponse.build(),
+        multipartKey, dbMultipartOpenKey, omKeyInfo, unUsedParts);
   }
 
+  private long getParentId(OMMetadataManager omMetadataManager,
+      String volumeName, String bucketName, String keyName) throws IOException {
 
-  private static String failureMessage(String volume, String bucket,
-                                       String keyName) {
-    return "Complete Multipart Upload Failed: volume: " +
-        volume + " bucket: " + bucket + " key: " + keyName;
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    return OMFileRequest
+        .getParentID(bucketId, pathComponents, keyName, omMetadataManager);
   }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java
index 69e0176..c655c47 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java
@@ -33,25 +33,27 @@ import java.io.IOException;
 import java.util.List;
 
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
  * Response for S3 Initiate Multipart Upload request for prefix layout.
  */
 @CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE, OPEN_FILE_TABLE,
-        MULTIPARTFILEINFO_TABLE})
+    MULTIPARTINFO_TABLE})
 public class S3InitiateMultipartUploadResponseWithFSO extends
         S3InitiateMultipartUploadResponse {
   private List<OmDirectoryInfo> parentDirInfos;
+  private String mpuDBKey;
 
   public S3InitiateMultipartUploadResponseWithFSO(
       @Nonnull OMResponse omResponse,
       @Nonnull OmMultipartKeyInfo omMultipartKeyInfo,
-      @Nonnull OmKeyInfo omKeyInfo,
+      @Nonnull OmKeyInfo omKeyInfo, @Nonnull String mpuDBKey,
       @Nonnull List<OmDirectoryInfo> parentDirInfos) {
     super(omResponse, omMultipartKeyInfo, omKeyInfo);
     this.parentDirInfos = parentDirInfos;
+    this.mpuDBKey = mpuDBKey;
   }
 
   /**
@@ -79,11 +81,10 @@ public class S3InitiateMultipartUploadResponseWithFSO extends
       }
     }
 
-    String multipartFileKey =
-            OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation,
-                    getOmKeyInfo(), getOmMultipartKeyInfo().getUploadID());
+    OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation,
+        getOmKeyInfo(), getOmMultipartKeyInfo().getUploadID());
 
     omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation,
-            multipartFileKey, getOmMultipartKeyInfo());
+        mpuDBKey, getOmMultipartKeyInfo());
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
index d641875..1e4d395 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
@@ -49,15 +49,18 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
 public class S3MultipartUploadAbortResponse extends OMClientResponse {
 
   private String multipartKey;
+  private String multipartOpenKey;
   private OmMultipartKeyInfo omMultipartKeyInfo;
   private boolean isRatisEnabled;
   private OmBucketInfo omBucketInfo;
 
   public S3MultipartUploadAbortResponse(@Nonnull OMResponse omResponse,
-      String multipartKey, @Nonnull OmMultipartKeyInfo omMultipartKeyInfo,
-      boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo) {
+      String multipartKey, String multipartOpenKey,
+      @Nonnull OmMultipartKeyInfo omMultipartKeyInfo, boolean isRatisEnabled,
+      @Nonnull OmBucketInfo omBucketInfo) {
     super(omResponse);
     this.multipartKey = multipartKey;
+    this.multipartOpenKey = multipartOpenKey;
     this.omMultipartKeyInfo = omMultipartKeyInfo;
     this.isRatisEnabled = isRatisEnabled;
     this.omBucketInfo = omBucketInfo;
@@ -78,7 +81,7 @@ public class S3MultipartUploadAbortResponse extends OMClientResponse {
 
     // Delete from openKey table and multipart info table.
     omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-        multipartKey);
+        multipartOpenKey);
     omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation,
         multipartKey);
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java
index 295373b..9e43dea 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java
@@ -26,23 +26,24 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRespo
 import javax.annotation.Nonnull;
 
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
  * Response for Multipart Abort Request - prefix layout.
  */
 @CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, DELETED_TABLE,
-    MULTIPARTFILEINFO_TABLE})
+    MULTIPARTINFO_TABLE})
 public class S3MultipartUploadAbortResponseWithFSO
     extends S3MultipartUploadAbortResponse {
 
   public S3MultipartUploadAbortResponseWithFSO(@Nonnull OMResponse omResponse,
-      String multipartKey, @Nonnull OmMultipartKeyInfo omMultipartKeyInfo,
-      boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo) {
+      String multipartKey, String multipartOpenKey,
+      @Nonnull OmMultipartKeyInfo omMultipartKeyInfo, boolean isRatisEnabled,
+      @Nonnull OmBucketInfo omBucketInfo) {
 
-    super(omResponse, multipartKey, omMultipartKeyInfo, isRatisEnabled,
-        omBucketInfo);
+    super(omResponse, multipartKey, multipartOpenKey, omMultipartKeyInfo,
+        isRatisEnabled, omBucketInfo);
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
index c2b119b..bcfff8b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
@@ -69,6 +69,9 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
    * @param openKey
    * @param omMultipartKeyInfo
    * @param oldPartKeyInfo
+   * @param openPartKeyInfoToBeDeleted
+   * @param isRatisEnabled
+   * @param omBucketInfo
    */
   @SuppressWarnings("checkstyle:ParameterNumber")
   public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java
index 746dad0..6f60498 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java
@@ -30,13 +30,13 @@ import javax.annotation.Nullable;
 
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE;
 
 /**
- * Response for S3MultipartUploadCommitPart request.
+ * Response for S3MultipartUploadCommitPartWithFSO request.
  */
 @CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, DELETED_TABLE,
-    MULTIPARTFILEINFO_TABLE})
+    MULTIPARTINFO_TABLE})
 public class S3MultipartUploadCommitPartResponseWithFSO
         extends S3MultipartUploadCommitPartResponse {
 
@@ -50,6 +50,9 @@ public class S3MultipartUploadCommitPartResponseWithFSO
    * @param openKey
    * @param omMultipartKeyInfo
    * @param oldPartKeyInfo
+   * @param openPartKeyInfoToBeDeleted
+   * @param isRatisEnabled
+   * @param omBucketInfo
    */
   @SuppressWarnings("checkstyle:ParameterNumber")
   public S3MultipartUploadCommitPartResponseWithFSO(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
index f593885..f89fea9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
@@ -39,22 +39,30 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
 
 /**
  * Response for Multipart Upload Complete request.
+ *
+ * This performs:
+ * 1) Delete multipart key from OpenKeyTable, MPUTable,
+ * 2) Add key to KeyTable,
+ * 3) Delete unused parts.
  */
 @CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE, KEY_TABLE, DELETED_TABLE,
     MULTIPARTINFO_TABLE})
 public class S3MultipartUploadCompleteResponse extends OMClientResponse {
   private String multipartKey;
+  private String multipartOpenKey;
   private OmKeyInfo omKeyInfo;
   private List<OmKeyInfo> partsUnusedList;
 
   public S3MultipartUploadCompleteResponse(
       @Nonnull OMResponse omResponse,
       @Nonnull String multipartKey,
+      @Nonnull String multipartOpenKey,
       @Nonnull OmKeyInfo omKeyInfo,
       @Nonnull List<OmKeyInfo> unUsedParts) {
     super(omResponse);
     this.partsUnusedList = unUsedParts;
     this.multipartKey = multipartKey;
+    this.multipartOpenKey = multipartOpenKey;
     this.omKeyInfo = omKeyInfo;
   }
 
@@ -71,16 +79,16 @@ public class S3MultipartUploadCompleteResponse extends OMClientResponse {
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
+    // 1. Delete multipart key from OpenKeyTable, MPUTable
     omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-        multipartKey);
+        multipartOpenKey);
     omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation,
         multipartKey);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-        omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
-    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey,
-        omKeyInfo);
+    // 2. Add key to KeyTable
+    String ozoneKey = addToKeyTable(omMetadataManager, batchOperation);
 
+    // 3. Delete unused parts
     if (!partsUnusedList.isEmpty()) {
       // Add unused parts to deleted key table.
       RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable()
@@ -96,6 +104,16 @@ public class S3MultipartUploadCompleteResponse extends OMClientResponse {
     }
   }
 
+  protected String addToKeyTable(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
+        omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
+    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey,
+        omKeyInfo);
+    return ozoneKey;
+  }
+
   protected String getMultipartKey() {
     return multipartKey;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java
index 42c5b8b..6776165 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.response.s3.multipart;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
@@ -32,23 +31,29 @@ import java.util.List;
 
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
  * Response for Multipart Upload Complete request.
+ *
+ * This performs:
+ * 1) Delete multipart key from OpenFileTable, MPUTable,
+ * 2) Add file to FileTable,
+ * 3) Delete unused parts.
  */
 @CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, FILE_TABLE, DELETED_TABLE,
-    MULTIPARTFILEINFO_TABLE})
+    MULTIPARTINFO_TABLE})
 public class S3MultipartUploadCompleteResponseWithFSO
         extends S3MultipartUploadCompleteResponse {
 
   public S3MultipartUploadCompleteResponseWithFSO(
       @Nonnull OMResponse omResponse,
       @Nonnull String multipartKey,
+      @Nonnull String multipartOpenKey,
       @Nonnull OmKeyInfo omKeyInfo,
       @Nonnull List<OmKeyInfo> unUsedParts) {
-    super(omResponse, multipartKey, omKeyInfo, unUsedParts);
+    super(omResponse, multipartKey, multipartOpenKey, omKeyInfo, unUsedParts);
   }
 
   /**
@@ -61,32 +66,20 @@ public class S3MultipartUploadCompleteResponseWithFSO
     checkStatusNotOK();
   }
 
-
   @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
+  protected String addToKeyTable(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
-    omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-            getMultipartKey());
-    omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation,
-            getMultipartKey());
+    String ozoneKey = omMetadataManager
+        .getOzoneKey(getOmKeyInfo().getVolumeName(),
+            getOmKeyInfo().getBucketName(), getOmKeyInfo().getKeyName());
 
-    String dbFileKey = OMFileRequest.addToFileTable(omMetadataManager,
-            batchOperation, getOmKeyInfo());
+    OMFileRequest
+        .addToFileTable(omMetadataManager, batchOperation, getOmKeyInfo());
 
-    if (!getPartsUnusedList().isEmpty()) {
-      // Add unused parts to deleted key table.
-      RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable()
-              .get(dbFileKey);
-      if (repeatedOmKeyInfo == null) {
-        repeatedOmKeyInfo = new RepeatedOmKeyInfo(getPartsUnusedList());
-      } else {
-        repeatedOmKeyInfo.addOmKeyInfo(getOmKeyInfo());
-      }
+    return ozoneKey;
 
-      omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-              dbFileKey, repeatedOmKeyInfo);
-    }
   }
+
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java
index ddafb38..858a075 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java
@@ -124,6 +124,7 @@ public class TestOMFileCreateRequestWithFSO extends TestOMFileCreateRequest {
     testNonRecursivePath(key, false, false, true);
   }
 
+  @Override
   protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
                                              boolean doAssert)
           throws Exception {
@@ -189,6 +190,7 @@ public class TestOMFileCreateRequestWithFSO extends TestOMFileCreateRequest {
     return config;
   }
 
+  @Override
   protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) {
     return new OMFileCreateRequestWithFSO(omRequest);
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java
index ca9ccbe..6b34088 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java
@@ -50,6 +50,7 @@ public class TestOMAllocateBlockRequestWithFSO
     return config;
   }
 
+  @Override
   protected String addKeyToOpenKeyTable(String volumeName, String bucketName)
           throws Exception {
     // need to initialize parentID
@@ -77,6 +78,7 @@ public class TestOMAllocateBlockRequestWithFSO
   }
 
   @NotNull
+  @Override
   protected OMAllocateBlockRequest getOmAllocateBlockRequest(
           OzoneManagerProtocolProtos.OMRequest modifiedOmRequest) {
     return new OMAllocateBlockRequestWithFSO(modifiedOmRequest);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
index 13ad623..f257cc9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
@@ -102,6 +102,7 @@ public class TestOMKeyCommitRequestWithFSO extends TestOMKeyCommitRequest {
     return new OMKeyCommitRequestWithFSO(omRequest);
   }
 
+  @Override
   protected void verifyKeyName(OmKeyInfo omKeyInfo) {
     // prefix layout format - stores fileName in the keyName DB field.
     String fileName = OzoneFSUtils.getFileName(keyName);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java
index 27e9c4f..c72bd76 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java
@@ -53,6 +53,7 @@ public class TestOMKeyCreateRequestWithFSO extends TestOMKeyCreateRequest {
     return config;
   }
 
+  @Override
   protected void addToKeyTable(String keyName) throws Exception {
     Path keyPath = Paths.get(keyName);
     long parentId = checkIntermediatePaths(keyPath);
@@ -67,6 +68,7 @@ public class TestOMKeyCreateRequestWithFSO extends TestOMKeyCreateRequest {
             fileName, omKeyInfo, -1, 50, omMetadataManager);
   }
 
+  @Override
   protected void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
       OMRequest omRequest, String keyName) throws Exception {
     keyName = omKeyCreateRequest.validateAndNormalizeKey(true, keyName);
@@ -84,6 +86,7 @@ public class TestOMKeyCreateRequestWithFSO extends TestOMKeyCreateRequest {
     Assert.assertNotNull(omKeyInfo);
   }
 
+  @Override
   protected long checkIntermediatePaths(Path keyPath) throws Exception {
     // Check intermediate paths are created
     keyPath = keyPath.getParent(); // skip the file name
@@ -112,6 +115,7 @@ public class TestOMKeyCreateRequestWithFSO extends TestOMKeyCreateRequest {
     return lastKnownParentId;
   }
 
+  @Override
   protected String getOpenKey(long id) throws IOException {
     String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
     OmBucketInfo omBucketInfo =
@@ -124,6 +128,7 @@ public class TestOMKeyCreateRequestWithFSO extends TestOMKeyCreateRequest {
     }
   }
 
+  @Override
   protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) {
     return new OMKeyCreateRequestWithFSO(omRequest);
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
index 3c9c5c9..3686b6a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
@@ -41,11 +41,13 @@ import java.util.NoSuchElementException;
  */
 public class TestOMKeyDeleteRequestWithFSO extends TestOMKeyDeleteRequest {
 
+  @Override
   protected OMKeyDeleteRequest getOmKeyDeleteRequest(
       OMRequest modifiedOmRequest) {
     return new OMKeyDeleteRequestWithFSO(modifiedOmRequest);
   }
 
+  @Override
   protected String addKeyToTable() throws Exception {
     String parentDir = "c/d/e";
     String fileName = "file1";
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
index c39d679..0e41e7c 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
@@ -78,12 +78,17 @@ public class TestS3InitiateMultipartUploadRequestWithFSO
 
     long parentID = verifyDirectoriesInDB(dirs, bucketID);
 
-    String multipartFileKey = omMetadataManager.getMultipartKey(parentID,
+    String multipartFileKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName,
+            modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs()
+                .getMultipartUploadID());
+
+    String multipartOpenFileKey = omMetadataManager.getMultipartKey(parentID,
             fileName, modifiedRequest.getInitiateMultiPartUploadRequest()
                     .getKeyArgs().getMultipartUploadID());
 
     OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
-            .get(multipartFileKey);
+            .get(multipartOpenFileKey);
     Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
     Assert.assertEquals("FileName mismatches!", fileName,
             omKeyInfo.getKeyName());
@@ -132,6 +137,7 @@ public class TestS3InitiateMultipartUploadRequestWithFSO
     return parentID;
   }
 
+  @Override
   protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
       OMRequest initiateMPURequest) {
     return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
index 9bff636..d6ded0a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
@@ -81,16 +81,19 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
         s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
             ozoneManagerDoubleBufferHelper);
 
+    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
 
-    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
-        multipartUploadID);
+    String multipartOpenKey = getMultipartOpenKey(volumeName, bucketName,
+        keyName, multipartUploadID);
 
     // Check table and response.
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
     Assert.assertNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert
+        .assertNull(omMetadataManager.getOpenKeyTable().get(multipartOpenKey));
 
   }
 
@@ -189,7 +192,7 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
     // no parent hierarchy
   }
 
-  protected String getMultipartKey(String volumeName, String bucketName,
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
       String keyName, String multipartUploadID) {
     return omMetadataManager.getMultipartKey(volumeName,
         bucketName, keyName, multipartUploadID);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java
index 7db5fd6..044f8a6 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java
@@ -60,7 +60,7 @@ public class TestS3MultipartUploadAbortRequestWithFSO
   }
 
   @Override
-  protected String getMultipartKey(String volumeName, String bucketName,
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
       String keyName, String multipartUploadID) {
     String fileName = StringUtils.substringAfter(keyName, dirName);
     return omMetadataManager.getMultipartKey(parentID, fileName,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
index 6c8beb0..a285ba7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
@@ -89,17 +89,21 @@ public class TestS3MultipartUploadCommitPartRequest
     Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
         == OzoneManagerProtocolProtos.Status.OK);
 
-    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
-            multipartUploadID);
+    String multipartOpenKey = getMultipartOpenKey(volumeName, bucketName,
+        keyName, multipartUploadID);
+
+    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
 
     Assert.assertNotNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
     Assert.assertTrue(omMetadataManager.getMultipartInfoTable()
         .get(multipartKey).getPartKeyInfoMap().size() == 1);
-    Assert.assertNull(omMetadataManager.getOpenKeyTable()
-        .get(omMetadataManager.getOpenKey(volumeName, bucketName, keyName,
-            clientID)));
+    Assert.assertNotNull(omMetadataManager.getOpenKeyTable()
+        .get(multipartOpenKey));
 
+    String partKey = getOpenKey(volumeName, bucketName, keyName, clientID);
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(partKey));
   }
 
   @Test
@@ -133,8 +137,8 @@ public class TestS3MultipartUploadCommitPartRequest
     Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
         == OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR);
 
-    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
-            multipartUploadID);
+    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
 
     Assert.assertNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
@@ -216,10 +220,16 @@ public class TestS3MultipartUploadCommitPartRequest
     return UUID.randomUUID().toString();
   }
 
-  protected String getMultipartKey(String volumeName, String bucketName,
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
       String keyName, String multipartUploadID) {
-    return omMetadataManager.getMultipartKey(volumeName,
-            bucketName, keyName, multipartUploadID);
+    return omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
+  }
+
+  protected String getOpenKey(String volumeName, String bucketName,
+      String keyName, long clientID) {
+    return omMetadataManager.getOpenKey(volumeName, bucketName,
+        keyName, clientID);
   }
 
   protected void createParentPath(String volumeName, String bucketName)
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java
index b6e5bcb..bd7f431 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java
@@ -40,20 +40,24 @@ public class TestS3MultipartUploadCommitPartRequestWithFSO
 
   private long parentID;
 
+  @Override
   protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
           OMRequest omRequest) {
     return new S3MultipartUploadCommitPartRequestWithFSO(omRequest);
   }
 
+  @Override
   protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
           OMRequest initiateMPURequest) {
     return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest);
   }
 
+  @Override
   protected String getKeyName() {
     return dirName + UUID.randomUUID().toString();
   }
 
+  @Override
   protected void addKeyToOpenKeyTable(String volumeName, String bucketName,
       String keyName, long clientID) throws Exception {
     long txnLogId = 10000;
@@ -66,13 +70,22 @@ public class TestS3MultipartUploadCommitPartRequestWithFSO
             fileName, omKeyInfo, clientID, txnLogId, omMetadataManager);
   }
 
-  protected String getMultipartKey(String volumeName, String bucketName,
+  @Override
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
       String keyName, String multipartUploadID) {
     String fileName = StringUtils.substringAfter(keyName, dirName);
     return omMetadataManager.getMultipartKey(parentID, fileName,
             multipartUploadID);
   }
 
+  @Override
+  protected String getOpenKey(String volumeName, String bucketName,
+      String keyName, long clientID) {
+    String fileName = StringUtils.substringAfter(keyName, dirName);
+    return omMetadataManager.getOpenFileName(parentID, fileName, clientID);
+  }
+
+  @Override
   protected OMRequest doPreExecuteInitiateMPU(String volumeName,
       String bucketName, String keyName) throws Exception {
     OMRequest omRequest =
@@ -95,6 +108,7 @@ public class TestS3MultipartUploadCommitPartRequestWithFSO
     return modifiedRequest;
   }
 
+  @Override
   protected void createParentPath(String volumeName, String bucketName)
       throws Exception {
     // Create parent dirs for the path
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java
index 972de95..17b4d9d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java
@@ -49,6 +49,7 @@ public class TestS3MultipartUploadCompleteRequestWithFSO
     OzoneManagerRatisUtils.setBucketFSOptimized(true);
   }
 
+  @Override
   protected String getKeyName() {
     String parentDir = UUID.randomUUID().toString() + "/a/b/c";
     String fileName = "file1";
@@ -56,6 +57,7 @@ public class TestS3MultipartUploadCompleteRequestWithFSO
     return keyName;
   }
 
+  @Override
   protected void addKeyToTable(String volumeName, String bucketName,
       String keyName, long clientID) throws Exception {
     // need to initialize parentID
@@ -81,6 +83,7 @@ public class TestS3MultipartUploadCompleteRequestWithFSO
             omMetadataManager);
   }
 
+  @Override
   protected String getMultipartKey(String volumeName, String bucketName,
       String keyName, String multipartUploadID) throws IOException {
     OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists(
@@ -106,6 +109,7 @@ public class TestS3MultipartUploadCompleteRequestWithFSO
             elements, keyName, omMetadataManager);
   }
 
+  @Override
   protected String getOzoneDBKey(String volumeName, String bucketName,
                                  String keyName) throws IOException {
     long parentID = getParentID(volumeName, bucketName, keyName);
@@ -113,16 +117,19 @@ public class TestS3MultipartUploadCompleteRequestWithFSO
     return omMetadataManager.getOzonePathKey(parentID, fileName);
   }
 
+  @Override
   protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq(
           OMRequest omRequest) {
     return new S3MultipartUploadCompleteRequestWithFSO(omRequest);
   }
 
+  @Override
   protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
           OMRequest omRequest) {
     return new S3MultipartUploadCommitPartRequestWithFSO(omRequest);
   }
 
+  @Override
   protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
           OMRequest initiateMPURequest) {
     return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java
index 97e1b90..4b99d76 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java
@@ -39,6 +39,7 @@ public class TestOMAllocateBlockResponseWithFSO
   private long parentID = 10;
   private String fileName = "file1";
 
+  @Override
   protected OmKeyInfo createOmKeyInfo() throws Exception {
     // need to initialize parentID
     String parentDir = keyName;
@@ -55,12 +56,14 @@ public class TestOMAllocateBlockResponseWithFSO
     return omKeyInfoFSO;
   }
 
+  @Override
   protected String getOpenKey() throws Exception {
     return omMetadataManager.getOpenFileName(
             parentID, fileName, clientID);
   }
 
   @NotNull
+  @Override
   protected OMAllocateBlockResponse getOmAllocateBlockResponse(
           OmKeyInfo omKeyInfo, OmBucketInfo omBucketInfo,
           OMResponse omResponse) {
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java
index 4eabb3f..3069e2f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.response.key;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
@@ -36,10 +35,10 @@ import org.junit.Assert;
 public class TestOMKeyCommitResponseWithFSO extends TestOMKeyCommitResponse {
 
   @NotNull
-  protected OMKeyCommitResponse getOmKeyCommitResponse(
-          OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
-          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
-          String ozoneKey) {
+  @Override
+  protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+      String ozoneKey) {
     Assert.assertNotNull(omBucketInfo);
     return new OMKeyCommitResponseWithFSO(omResponse, omKeyInfo, ozoneKey,
         openKey, omBucketInfo);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java
index f816d17..59cdcd3 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java
@@ -66,6 +66,7 @@ public class TestOMKeyCreateResponseWithFSO extends TestOMKeyCreateResponse {
   }
 
   @NotNull
+  @Override
   protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
       OmBucketInfo bucketInfo, OMResponse response) {
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseWithFSO.java
index b29fa2a..6ef79d4 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseWithFSO.java
@@ -58,10 +58,14 @@ public class TestS3InitiateMultipartUploadResponseWithFSO
     // Do manual commit and see whether addToBatch is successful or not.
     omMetadataManager.getStore().commitBatchOperation(batchOperation);
 
-    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
-            multipartUploadID);
+    String multipartKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
 
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(multipartKey);
+    String multipartOpenKey = omMetadataManager
+        .getMultipartKey(parentID, fileName, multipartUploadID);
+
+    OmKeyInfo omKeyInfo =
+        omMetadataManager.getOpenKeyTable().get(multipartOpenKey);
     Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
     Assert.assertEquals("FileName mismatches!", fileName,
             omKeyInfo.getKeyName());
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index cef6009..3e9d789 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -121,8 +121,8 @@ public class TestS3MultipartResponse {
   }
 
   public S3MultipartUploadAbortResponse createS3AbortMPUResponse(
-      String multipartKey, OmMultipartKeyInfo omMultipartKeyInfo,
-      OmBucketInfo omBucketInfo) {
+      String multipartKey, String multipartOpenKey,
+      OmMultipartKeyInfo omMultipartKeyInfo, OmBucketInfo omBucketInfo) {
     OMResponse omResponse = OMResponse.newBuilder()
         .setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload)
         .setStatus(OzoneManagerProtocolProtos.Status.OK)
@@ -131,7 +131,7 @@ public class TestS3MultipartResponse {
             MultipartUploadAbortResponse.newBuilder().build()).build();
 
     return getS3MultipartUploadAbortResp(multipartKey,
-        omMultipartKeyInfo, omBucketInfo, omResponse);
+        multipartOpenKey, omMultipartKeyInfo, omBucketInfo, omResponse);
   }
 
   public void addPart(int partNumber, PartKeyInfo partKeyInfo,
@@ -212,8 +212,12 @@ public class TestS3MultipartResponse {
                             .setKeyName(keyName)
                             .setMultipartUploadID(multipartUploadID)).build();
 
+    String mpuKey = omMetadataManager.getMultipartKey(
+        omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(),
+        keyName, multipartUploadID);
+
     return new S3InitiateMultipartUploadResponseWithFSO(omResponse,
-        multipartKeyInfo, omKeyInfo, parentDirInfos);
+        multipartKeyInfo, omKeyInfo, mpuKey, parentDirInfos);
   }
 
   @SuppressWarnings("checkstyle:ParameterNumber")
@@ -236,7 +240,9 @@ public class TestS3MultipartResponse {
 
     String fileName = OzoneFSUtils.getFileName(keyName);
 
-    String multipartKey = getMultipartKey(parentID, keyName, multipartUploadID);
+    String multipartKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
+
     boolean isRatisEnabled = true;
     String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
     OmBucketInfo omBucketInfo =
@@ -274,7 +280,12 @@ public class TestS3MultipartResponse {
           OzoneManagerProtocolProtos.Status status,
           List<OmKeyInfo> unUsedParts) {
 
-    String multipartKey = getMultipartKey(parentID, keyName, multipartUploadID);
+
+    String multipartKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
+
+    String multipartOpenKey = getMultipartKey(parentID, keyName,
+        multipartUploadID);
 
     OMResponse omResponse = OMResponse.newBuilder()
             .setCmdType(OzoneManagerProtocolProtos.Type.CompleteMultiPartUpload)
@@ -285,7 +296,7 @@ public class TestS3MultipartResponse {
                             .setVolume(volumeName).setKey(keyName)).build();
 
     return new S3MultipartUploadCompleteResponseWithFSO(omResponse,
-        multipartKey, omKeyInfo, unUsedParts);
+        multipartKey, multipartOpenKey, omKeyInfo, unUsedParts);
   }
 
   private String getMultipartKey(long parentID, String keyName,
@@ -303,9 +314,10 @@ public class TestS3MultipartResponse {
   }
 
   protected S3MultipartUploadAbortResponse getS3MultipartUploadAbortResp(
-      String multipartKey, OmMultipartKeyInfo omMultipartKeyInfo,
-      OmBucketInfo omBucketInfo, OMResponse omResponse) {
+      String multipartKey, String multipartOpenKey,
+      OmMultipartKeyInfo omMultipartKeyInfo, OmBucketInfo omBucketInfo,
+      OMResponse omResponse) {
     return new S3MultipartUploadAbortResponse(omResponse, multipartKey,
-        omMultipartKeyInfo, true, omBucketInfo);
+        multipartOpenKey, omMultipartKeyInfo, true, omBucketInfo);
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
index ae8650e..a568f90 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
@@ -45,8 +45,11 @@ public class TestS3MultipartUploadAbortResponse
     String bucketName = UUID.randomUUID().toString();
     String keyName = getKeyName();
     String multipartUploadID = UUID.randomUUID().toString();
-    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
-        multipartUploadID);
+    String multipartOpenKey = getMultipartOpenKey(volumeName, bucketName,
+        keyName, multipartUploadID);
+
+    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
 
     OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
@@ -59,7 +62,7 @@ public class TestS3MultipartUploadAbortResponse
         batchOperation);
 
     S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse =
-        createS3AbortMPUResponse(multipartKey,
+        createS3AbortMPUResponse(multipartKey, multipartOpenKey,
             s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(),
             omBucketInfo);
 
@@ -84,8 +87,10 @@ public class TestS3MultipartUploadAbortResponse
     String bucketName = UUID.randomUUID().toString();
     String keyName = getKeyName();
     String multipartUploadID = UUID.randomUUID().toString();
-    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
-        multipartUploadID);
+    String multipartOpenKey = getMultipartOpenKey(volumeName, bucketName,
+        keyName, multipartUploadID);
+    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
 
     OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
@@ -116,7 +121,7 @@ public class TestS3MultipartUploadAbortResponse
 
 
     S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse =
-        createS3AbortMPUResponse(multipartKey,
+        createS3AbortMPUResponse(multipartKey, multipartOpenKey,
             s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(),
             omBucketInfo);
 
@@ -158,7 +163,7 @@ public class TestS3MultipartUploadAbortResponse
     return UUID.randomUUID().toString();
   }
 
-  protected String getMultipartKey(String volumeName, String bucketName,
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
       String keyName, String multipartUploadID) {
     return omMetadataManager.getMultipartKey(volumeName,
         bucketName, keyName, multipartUploadID);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponseWithFSO.java
index 8ce9170..41a089e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponseWithFSO.java
@@ -38,30 +38,40 @@ public class TestS3MultipartUploadAbortResponseWithFSO
 
   private long parentID = 1027;
 
+  @Override
   protected String getKeyName() {
     return dirName + UUID.randomUUID().toString();
   }
 
-  protected String getMultipartKey(String volumeName, String bucketName,
+  @Override
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
       String keyName, String multipartUploadID) {
     String fileName = StringUtils.substringAfter(keyName, dirName);
     return omMetadataManager.getMultipartKey(parentID, fileName,
         multipartUploadID);
   }
 
+  @Override
   protected S3InitiateMultipartUploadResponse getS3InitiateMultipartUploadResp(
       OmMultipartKeyInfo multipartKeyInfo, OmKeyInfo omKeyInfo,
       OzoneManagerProtocolProtos.OMResponse omResponse) {
+
+    String mpuDBKey =
+        omMetadataManager.getMultipartKey(omKeyInfo.getVolumeName(),
+        omKeyInfo.getBucketName(), omKeyInfo.getKeyName(),
+        multipartKeyInfo.getUploadID());
+
     return new S3InitiateMultipartUploadResponseWithFSO(omResponse,
-        multipartKeyInfo, omKeyInfo, new ArrayList<>());
+        multipartKeyInfo, omKeyInfo, mpuDBKey, new ArrayList<>());
   }
 
+  @Override
   protected S3MultipartUploadAbortResponse getS3MultipartUploadAbortResp(
-      String multipartKey, OmMultipartKeyInfo omMultipartKeyInfo,
-      OmBucketInfo omBucketInfo,
+      String multipartKey, String multipartOpenKey,
+      OmMultipartKeyInfo omMultipartKeyInfo, OmBucketInfo omBucketInfo,
       OzoneManagerProtocolProtos.OMResponse omResponse) {
     return new S3MultipartUploadAbortResponseWithFSO(omResponse, multipartKey,
-        omMultipartKeyInfo, true, omBucketInfo);
+        multipartOpenKey, omMultipartKeyInfo, true, omBucketInfo);
   }
 
   @Override
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java
index 0ac6584..17a8e60 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java
@@ -54,8 +54,8 @@ public class TestS3MultipartUploadCommitPartResponseWithFSO
 
     createParentPath(volumeName, bucketName);
     String fileName = OzoneFSUtils.getFileName(keyName);
-    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
-            multipartUploadID);
+    String multipartKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
     long clientId = Time.now();
     String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
             clientId);
@@ -70,7 +70,7 @@ public class TestS3MultipartUploadCommitPartResponseWithFSO
 
     omMetadataManager.getStore().commitBatchOperation(batchOperation);
 
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(openKey));
     Assert.assertNotNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
 
@@ -93,8 +93,8 @@ public class TestS3MultipartUploadCommitPartResponseWithFSO
     String multipartUploadID = UUID.randomUUID().toString();
 
     String fileName = OzoneFSUtils.getFileName(keyName);
-    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
-            multipartUploadID);
+    String multipartKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
 
     S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO =
             createS3InitiateMPUResponseFSO(volumeName, bucketName, parentID,
@@ -128,7 +128,7 @@ public class TestS3MultipartUploadCommitPartResponseWithFSO
     s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
             batchOperation);
 
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(openKey));
     Assert.assertNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
 
@@ -199,7 +199,7 @@ public class TestS3MultipartUploadCommitPartResponseWithFSO
     s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
             batchOperation);
 
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(openKey));
     Assert.assertNull(
             omMetadataManager.getMultipartInfoTable().get(multipartKey));
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java
index ed66ba6..624c2fb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.om.response.s3.multipart;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
@@ -58,9 +59,23 @@ public class TestS3MultipartUploadCompleteResponseWithFSO
     long txnId = 50;
     long objectId = parentID + 1;
     String fileName = OzoneFSUtils.getFileName(keyName);
-    String dbMultipartKey = omMetadataManager.getMultipartKey(parentID,
+    String dbMultipartKey = omMetadataManager.getMultipartKey(volumeName,
+            bucketName, keyName, multipartUploadID);
+    String dbMultipartOpenKey = omMetadataManager.getMultipartKey(parentID,
             fileName, multipartUploadID);
     long clientId = Time.now();
+
+    // add MPU entry to OpenFileTable
+    List<OmDirectoryInfo> parentDirInfos = new ArrayList<>();
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO =
+        createS3InitiateMPUResponseFSO(volumeName, bucketName, parentID,
+            keyName, multipartUploadID, parentDirInfos);
+
+    s3InitiateMultipartUploadResponseFSO.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
     String dbOpenKey = omMetadataManager.getOpenFileName(parentID, fileName,
             clientId);
     String dbKey = omMetadataManager.getOzonePathKey(parentID, fileName);
@@ -79,6 +94,11 @@ public class TestS3MultipartUploadCompleteResponseWithFSO
     addS3MultipartUploadCommitPartResponseFSO(volumeName, bucketName, keyName,
             multipartUploadID, dbOpenKey);
 
+    Assert.assertNotNull(
+        omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
+    Assert.assertNotNull(
+        omMetadataManager.getOpenKeyTable().get(dbMultipartOpenKey));
+
     List<OmKeyInfo> unUsedParts = new ArrayList<>();
     S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse =
             createS3CompleteMPUResponseFSO(volumeName, bucketName, parentID,
@@ -94,7 +114,7 @@ public class TestS3MultipartUploadCompleteResponseWithFSO
     Assert.assertNull(
         omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
     Assert.assertNull(
-            omMetadataManager.getOpenKeyTable().get(dbMultipartKey));
+            omMetadataManager.getOpenKeyTable().get(dbMultipartOpenKey));
 
     // As no parts are created, so no entries should be there in delete table.
     Assert.assertEquals(0, omMetadataManager.countRowsInTable(
@@ -117,8 +137,10 @@ public class TestS3MultipartUploadCompleteResponseWithFSO
     int deleteEntryCount = 0;
 
     String fileName = OzoneFSUtils.getFileName(keyName);
-    String dbMultipartKey = omMetadataManager.getMultipartKey(parentID,
-            fileName, multipartUploadID);
+    String dbMultipartKey = omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
+    String dbMultipartOpenKey = omMetadataManager.getMultipartKey(parentID,
+        fileName, multipartUploadID);
 
     S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO =
             addS3InitiateMultipartUpload(volumeName, bucketName, keyName,
@@ -159,7 +181,7 @@ public class TestS3MultipartUploadCompleteResponseWithFSO
     Assert.assertNull(
             omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
     Assert.assertNull(
-            omMetadataManager.getOpenKeyTable().get(dbMultipartKey));
+            omMetadataManager.getOpenKeyTable().get(dbMultipartOpenKey));
 
     // As 1 unused parts exists, so 1 unused entry should be there in delete
     // table.

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org