You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ra...@apache.org on 2021/02/25 06:20:20 UTC

[ozone] branch HDDS-2939 updated (818e5ef -> 3230b0f)

This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a change to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git.


    omit 818e5ef  HDDS-4513.[FSO]OzoneContract unit test case fixes (#1945)
    omit 1de4738  HDDS-4813. [FSO]S3Multipart: Implement UploadCompleteRequest (#1923)
    omit 40cfa43  HDDS-4742. Make trash work with FS Optimised Buckets. (#1915)
    omit 630f69e  HDDS-4781. [FSO]S3MultiPart: Implement create and commit upload part file (#1897)
    omit 5be097d  HDDS-4805. [FSO]Fix findbugs issues after HDDS-2195 (#1906)
    omit 93ba7e5  HDDS-4771. [FSO]S3MultiPart: Implement InitiateMultiPartUpload (#1877)
    omit a5dc2d8  HDDS-4486. Feature Config: Make proper enableFSPaths and OFS optimized flag combinations (#1848)
    omit 9de173f  HDDS-4720. RenameKey : add unit test to verify bucket#renameKey (#1847)
    omit 09fe3f4  HDDS-4717. Fix TestOzoneFileSystemV1 and TestObjectStoreV1 cases (#1815)
    omit a37cc34  HDDS-4658. LookupKey: do lookup in dir and file tables (#1775)
    omit f5ed089  HDDS-2942. Putkey : create key table entries for intermediate directories in the key path (#1764)
    omit fd94a0e  HDDS-4514. AllocateBlock : lookup and update open file table for the given path (#1679)
    omit 566c1a3  HDDS-4596. Directory table, fileTable and openFile Table is missing from the OM DB Definition. (#1724)
    omit a76ba9c  HDDS-4321. Fix compilation errors : merge HDDS-4308 and HDDS-4473 changes into the branch (#1668)
    omit 0a12595  HDDS-4358: Delete : make delete an atomic operation (#1607)
    omit 272d10c  HDDS-4357: Rename : make rename an atomic ops by updating key path entry in dir/file table (#1557)
    omit 11590ab  HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)
    omit 102961f  HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)
    omit 3c6affa  HDDS-2949: mkdir : store directory entries in a separate table (#1404)
     add d96f154  HDDS-4834. Replication failure in secure environment (#1931)
     add afcb289  HDDS-4482. SCM should be able to persist CRL (#1874)
     add ac8aee7  HDDS-4844. Fixing build issue for some platforms. (#1935)
     add 96e27a5  HDDS-4845. Update NodeStatus OperationalState for Datanodes in Recon (#1939)
     add 2c53c82  HDDS-4851. More compatibility problem with DatanodeDetails.Port.Name.REPLICATION (#1946)
     add 2784612  HDDS-4848. Commit key audit log has inaccurate factor and type. (#1943)
     add 4f5ab87  HDDS-4849. Improve Ozone admin shell decommission/recommission/maintenance commands user experience (#1944)
     add f4411cc  HDDS-4846. Add line break when node has no pipelines for `ozone admin datanode list` command (#1938)
     add dd61b17  HDDS-4148. Add servlet to return SCM DB checkpoint. (#1353)
     add d0496ce  HDDS-4808. Add Genesis benchmark for various CRC implementations (#1910)
     add bf2c8a6  HDDS-4850. Intermittent failure in ozonesecure due to unable to allocate block (#1948)
     add 1cf9015  HDDS-4857. Format ReplicationType.java which indentation are confusion (#1952)
     add fb5b1f2  HDDS-4853. libexec/entrypoint.sh might copy from wrong path (#1951)
     add a7db06d  HDDS-4653. Support TDE for MPU Keys on Encrypted Buckets (#1766)
     add df38294  HDDS-4832. Show Datanode OperationalState in Recon (#1937)
     new 31b6673  HDDS-2949: mkdir : store directory entries in a separate table (#1404)
     new 4d2d64c  HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)
     new 78605dd  HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)
     new aa70ea5  HDDS-4357: Rename : make rename an atomic ops by updating key path entry in dir/file table (#1557)
     new 0cae5cc  HDDS-4358: Delete : make delete an atomic operation (#1607)
     new d7a38b0  HDDS-4321. Fix compilation errors : merge HDDS-4308 and HDDS-4473 changes into the branch (#1668)
     new f1fc060  HDDS-4596. Directory table, fileTable and openFile Table is missing from the OM DB Definition. (#1724)
     new 90e8362  HDDS-4514. AllocateBlock : lookup and update open file table for the given path (#1679)
     new 6145c24  HDDS-2942. Putkey : create key table entries for intermediate directories in the key path (#1764)
     new 6c9b41f  HDDS-4658. LookupKey: do lookup in dir and file tables (#1775)
     new ea6dc1c  HDDS-4717. Fix TestOzoneFileSystemV1 and TestObjectStoreV1 cases (#1815)
     new 9abd402  HDDS-4720. RenameKey : add unit test to verify bucket#renameKey (#1847)
     new 91733a9  HDDS-4486. Feature Config: Make proper enableFSPaths and OFS optimized flag combinations (#1848)
     new 11af770  HDDS-4771. [FSO]S3MultiPart: Implement InitiateMultiPartUpload (#1877)
     new db4f084  HDDS-4805. [FSO]Fix findbugs issues after HDDS-2195 (#1906)
     new ee4fd7d  HDDS-4781. [FSO]S3MultiPart: Implement create and commit upload part file (#1897)
     new 3812776  HDDS-4742. Make trash work with FS Optimised Buckets. (#1915)
     new fd393da  HDDS-4813. [FSO]S3Multipart: Implement UploadCompleteRequest (#1923)
     new 3230b0f  HDDS-4513.[FSO]OzoneContract unit test case fixes (#1945)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (818e5ef)
            \
             N -- N -- N   refs/heads/HDDS-2939 (3230b0f)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 19 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../apache/hadoop/hdds/client/ReplicationType.java |   4 +-
 .../hadoop/hdds/protocol/DatanodeDetails.java      |   3 +
 .../hadoop/hdds/scm/container/ContainerInfo.java   |   1 +
 .../hadoop/hdds/scm/pipeline/PipelineID.java       |   2 +
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   6 +
 .../ozone/common/ChecksumByteBufferImpl.java       |  98 ++++++
 .../hadoop/ozone/common/NativeCheckSumCRC32.java   |  85 +++++
 .../ozone/common/PureJavaCrc32ByteBuffer.java      |   3 +-
 .../ozone/common/PureJavaCrc32CByteBuffer.java     |   3 +-
 .../org/apache/hadoop/util/NativeCRC32Wrapper.java |  72 ++++
 .../hadoop/{hdds/fs => util}/package-info.java     |   5 +-
 .../common/TestChecksumImplsComputeSameValues.java |  99 ++++++
 .../common/statemachine/DatanodeStateMachine.java  |   3 +-
 .../replication/GrpcReplicationClient.java         |  18 +-
 .../replication/SimpleContainerDownloader.java     |  10 +-
 .../x509/certificate/authority/CRLApprover.java}   |  25 +-
 .../certificate/authority/CertificateServer.java   |  31 +-
 .../certificate/authority/CertificateStore.java    |  34 +-
 .../certificate/authority/DefaultCAServer.java     |  39 ++-
 .../certificate/authority/DefaultCRLApprover.java} |  46 +--
 .../hadoop/hdds/security/x509/crl/CRLInfo.java     | 168 +++++++++
 .../hdds/security/x509/crl}/package-info.java      |   6 +-
 .../hadoop/hdds/utils/DBCheckpointMetrics.java     |  95 ++++++
 .../hadoop/hdds/utils/DBCheckpointServlet.java     | 113 +++----
 .../hadoop/hdds/utils/db/RDBCheckpointManager.java |   2 +-
 .../org/apache/hadoop/hdds/utils/db/RDBStore.java  |   2 +-
 .../x509/certificate/authority/MockCAStore.java    |  14 +-
 .../certificate/authority/TestDefaultCAServer.java |  68 +++-
 .../interface-client/src/main/proto/hdds.proto     |   8 +
 .../container/placement/metrics/SCMMetrics.java    |   8 +
 .../{PipelineCodec.java => CRLInfoCodec.java}      |  41 +--
 .../hadoop/hdds/scm/metadata/SCMDBDefinition.java  |  23 +-
 .../hadoop/hdds/scm/metadata/SCMMetadataStore.java |  16 +
 .../hdds/scm/metadata/SCMMetadataStoreImpl.java    |  32 ++
 .../hdds/scm/node/NodeDecommissionManager.java     |  29 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |  18 +-
 .../hadoop/hdds/scm/server/SCMCertStore.java       | 101 ++++--
 .../hdds/scm/server/SCMDBCheckpointServlet.java    |  64 ++++
 .../hdds/scm/server/StorageContainerManager.java   |  46 ++-
 .../server/StorageContainerManagerHttpServer.java  |   8 +-
 .../scm/TestStorageContainerManagerHttpServer.java |   2 +-
 ...TestSCMStoreImplWithOldPipelineIDKeyFormat.java |  11 +
 .../hadoop/hdds/scm/server/TestSCMCertStore.java   | 269 +++++++++++++++
 .../hdds/scm/cli/container/InfoSubcommand.java     |  39 ++-
 .../scm/cli/datanode/DecommissionSubCommand.java   |  14 +-
 .../hdds/scm/cli/datanode/ListInfoSubcommand.java  |   4 +-
 .../scm/cli/datanode/MaintenanceSubCommand.java    |  16 +-
 .../scm/cli/datanode/RecommissionSubCommand.java   |  17 +-
 .../hadoop/ozone/client/io/KeyInputStream.java     |  43 +++
 .../client/io/MultipartCryptoKeyInputStream.java   | 375 +++++++++++++++++++++
 .../ozone/client/io/OzoneCryptoInputStream.java    |  57 ++++
 .../hadoop/ozone/client/io/OzoneInputStream.java   |   3 +
 .../hadoop/ozone/client/io/OzoneOutputStream.java  |   7 +
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  73 ++--
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |   7 +-
 .../hadoop/ozone/om/helpers/OmKeyLocationInfo.java |  35 +-
 .../ozone/om/helpers/OmKeyLocationInfoGroup.java   |  29 +-
 .../hadoop/ozone/om/helpers/OzoneFileStatus.java   |   6 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   2 +
 .../src/main/compose/ozonesecure/docker-config     |   5 +
 .../dist/src/main/compose/ozonesecure/test.sh      |   7 +-
 .../dist/src/main/compose/xcompat/clients.yaml     |   9 -
 hadoop-ozone/dist/src/main/compose/xcompat/test.sh |   2 +-
 .../dist/src/main/dockerlibexec/entrypoint.sh      |   2 +-
 .../smoketest/admincli/replicationmanager.robot    |  10 +-
 .../src/main/smoketest/compatibility/read.robot    |  11 +-
 .../src/main/smoketest/compatibility/write.robot   |  10 +-
 .../dist/src/main/smoketest/recon/recon-api.robot  |   8 +-
 .../readdata.robot => replication/wait.robot}      |  15 +-
 .../scm/TestSCMDbCheckpointServlet.java}           | 144 ++------
 .../client/rpc/TestOzoneAtRestEncryption.java      | 198 ++++++++++-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |  17 +-
 .../hadoop/ozone/om/TestOMDbCheckpointServlet.java |  54 +--
 .../hadoop/ozone/recon/TestReconAsPassiveScm.java  |   2 +
 .../src/main/proto/OmClientProtocol.proto          |   3 +
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |   4 +-
 .../hadoop/ozone/om/OMDBCheckpointServlet.java     | 226 +------------
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |  48 +--
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  11 +
 .../ozone/om/request/key/OMKeyCommitRequest.java   |   2 +-
 .../ozone/om/request/key/OMKeyCommitRequestV1.java |   2 +-
 .../ozone/om/request/key/OMKeyCreateRequest.java   |   6 +-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |  31 ++
 .../S3InitiateMultipartUploadRequest.java          |  18 +-
 .../S3MultipartUploadCommitPartRequest.java        |   2 +-
 .../S3MultipartUploadCommitPartRequestV1.java      |   2 +-
 .../S3MultipartUploadCompleteRequest.java          |  69 ++--
 .../S3MultipartUploadCompleteRequestV1.java        |   2 +-
 .../protocolPB/OzoneManagerRequestHandler.java     |  14 +-
 .../TestS3InitiateMultipartUploadRequest.java      |  60 ----
 .../hadoop/ozone/recon/api/NodeEndpoint.java       |   3 +
 .../ozone/recon/api/types/DatanodeMetadata.java    |  15 +
 .../hadoop/ozone/recon/scm/ReconNodeManager.java   |  20 +-
 .../webapps/recon/ozone-recon-web/api/db.json      | 286 +++++++++++++++-
 .../ozone-recon-web/src/types/datanode.types.tsx   |  12 +-
 .../src/views/datanodes/datanodes.tsx              |  59 +++-
 .../hadoop/ozone/recon/api/TestEndpoints.java      |  47 +++
 .../ozone/recon/scm/TestReconNodeManager.java      |  33 +-
 .../hadoop/ozone/genesis/BenchMarkCRCBatch.java    | 138 ++++++++
 .../ozone/genesis/BenchMarkCRCStreaming.java       | 169 ++++++++++
 .../org/apache/hadoop/ozone/genesis/Genesis.java   |   2 +-
 tools/fault-injection-service/CMakeLists.txt       | 229 ++++++-------
 tools/fault-injection-service/README.md            |  37 +-
 103 files changed, 3500 insertions(+), 1002 deletions(-)
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java
 copy hadoop-hdds/common/src/main/java/org/apache/hadoop/{hdds/fs => util}/package-info.java (87%)
 create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java
 copy hadoop-hdds/framework/src/{test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java => main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CRLApprover.java} (60%)
 copy hadoop-hdds/framework/src/{test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java => main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCRLApprover.java} (51%)
 create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
 copy {hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api => hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl}/package-info.java (84%)
 create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java
 copy hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java => hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java (68%)
 copy hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/{PipelineCodec.java => CRLInfoCodec.java} (56%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDBCheckpointServlet.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java
 create mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/MultipartCryptoKeyInputStream.java
 create mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneCryptoInputStream.java
 copy hadoop-ozone/dist/src/main/smoketest/{topology/readdata.robot => replication/wait.robot} (70%)
 copy hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/{ozone/om/TestOMDbCheckpointServlet.java => hdds/scm/TestSCMDbCheckpointServlet.java} (55%)
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCBatch.java
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCStreaming.java


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 19/19: HDDS-4513.[FSO]OzoneContract unit test case fixes (#1945)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 3230b0f5c0471012bc042b2b14e382b4bb973ef4
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Thu Feb 25 10:09:42 2021 +0530

    HDDS-4513.[FSO]OzoneContract unit test case fixes (#1945)
---
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 61 +++-------------------
 .../ozone/contract/ITestOzoneContractCreate.java   | 26 +++++++--
 .../ozone/contract/ITestOzoneContractDelete.java   | 26 +++++++--
 .../contract/ITestOzoneContractGetFileStatus.java  | 27 ++++++++--
 .../fs/ozone/contract/ITestOzoneContractMkdir.java | 26 +++++++--
 .../fs/ozone/contract/ITestOzoneContractOpen.java  | 27 ++++++++--
 .../ozone/contract/ITestOzoneContractRename.java   | 26 +++++++--
 .../ozone/contract/ITestOzoneContractRootDir.java  | 26 +++++++--
 .../fs/ozone/contract/ITestOzoneContractSeek.java  | 27 ++++++++--
 .../ozone/contract/ITestOzoneContractUnbuffer.java | 26 +++++++--
 .../fs/ozone/contract/ITestOzoneContractUtils.java | 60 +++++++++++++++++++++
 .../hadoop/fs/ozone/contract/OzoneContract.java    | 13 +++++
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      | 11 ++--
 13 files changed, 288 insertions(+), 94 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index eb7eaca..ed62990 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.junit.Assert;
 import org.junit.After;
 import org.junit.BeforeClass;
@@ -278,36 +277,6 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   }
 
   /**
-   * Case-1) fromKeyName should exist, otw throws exception.
-   */
-  @Test
-  public void testRenameWithNonExistentSource() throws Exception {
-    // Skip as this will run only in new layout
-    if (!isEnabledFileSystemPaths()) {
-      return;
-    }
-
-    final String root = "/root";
-    final String dir1 = root + "/dir1";
-    final String dir2 = root + "/dir2";
-    final Path source = new Path(getFs().getUri().toString() + dir1);
-    final Path destin = new Path(getFs().getUri().toString() + dir2);
-
-    // creates destin
-    getFs().mkdirs(destin);
-    LOG.info("Created destin dir: {}", destin);
-
-    LOG.info("Rename op-> source:{} to destin:{}}", source, destin);
-    try {
-      getFs().rename(source, destin);
-      Assert.fail("Should throw exception : Source doesn't exist!");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
-    }
-  }
-
-  /**
    * Case-2) Cannot rename a directory to its own subdirectory.
    */
   @Test
@@ -327,14 +296,8 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
 
     final Path sourceRoot = new Path(getFs().getUri().toString() + root);
     LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
-    try {
-      getFs().rename(sourceRoot, subDir1);
-      Assert.fail("Should throw exception : Cannot rename a directory to" +
-              " its own subdirectory");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
-    }
+    //  rename should fail and return false
+    Assert.assertFalse(getFs().rename(sourceRoot, subDir1));
   }
 
   /**
@@ -354,30 +317,18 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     final String dir2 = dir1 + "/dir2";
     final Path dir2SourcePath = new Path(getFs().getUri().toString() + dir2);
     getFs().mkdirs(dir2SourcePath);
-
     // (a) parent of dst does not exist.  /root_dir/b/c
     final Path destinPath = new Path(getFs().getUri().toString()
             + root + "/b/c");
-    try {
-      getFs().rename(dir2SourcePath, destinPath);
-      Assert.fail("Should fail as parent of dst does not exist!");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
-    }
 
+    // rename should fail and return false
+    Assert.assertFalse(getFs().rename(dir2SourcePath, destinPath));
     // (b) parent of dst is a file. /root_dir/file1/c
     Path filePath = new Path(getFs().getUri().toString() + root + "/file1");
     ContractTestUtils.touch(getFs(), filePath);
-
     Path newDestinPath = new Path(filePath, "c");
-    try {
-      getFs().rename(dir2SourcePath, newDestinPath);
-      Assert.fail("Should fail as parent of dst is a file!");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
-    }
+    // rename should fail and return false
+    Assert.assertFalse(getFs().rename(dir2SourcePath, newDestinPath));
   }
 
   @Override
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
index 19ff428..034cf1e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests creating files.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractCreate extends AbstractContractCreateTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractCreate(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractCreate.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -46,4 +59,9 @@ public class ITestOzoneContractCreate extends AbstractContractCreateTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
index 33e6260..1381a2c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests covering deletes.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractDelete extends AbstractContractDeleteTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractDelete(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractDelete.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -46,4 +59,9 @@ public class ITestOzoneContractDelete extends AbstractContractDeleteTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
index 9d9aa56..04a3fb5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
@@ -19,28 +19,42 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Ozone contract tests covering getFileStatus.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractGetFileStatus
     extends AbstractContractGetFileStatusTest {
 
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractGetFileStatus(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
   private static final Logger LOG =
       LoggerFactory.getLogger(ITestOzoneContractGetFileStatus.class);
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractGetFileStatus.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -63,4 +77,9 @@ public class ITestOzoneContractGetFileStatus
   protected Configuration createConfiguration() {
     return super.createConfiguration();
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
index 305164c..862b2b9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Test dir operations on Ozone.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractMkdir extends AbstractContractMkdirTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractMkdir(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractMkdir.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -46,4 +59,9 @@ public class ITestOzoneContractMkdir extends AbstractContractMkdirTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
index aa81965..83a6306 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
@@ -19,21 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests opening files.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractOpen extends AbstractContractOpenTest {
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractOpen(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractOpen.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -45,4 +59,9 @@ public class ITestOzoneContractOpen extends AbstractContractOpenTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
index 3660d81..2fa1c64 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests covering rename.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractRename extends AbstractContractRenameTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractRename(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractRename.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -47,4 +60,9 @@ public class ITestOzoneContractRename extends AbstractContractRenameTest {
     return new OzoneContract(conf);
   }
 
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
index c64dafa..5ca5bc3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
@@ -19,23 +19,36 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract test for ROOT directory operations.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractRootDir extends
     AbstractContractRootDirectoryTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractRootDir(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractRootDir.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -48,4 +61,9 @@ public class ITestOzoneContractRootDir extends
     return new OzoneContract(conf);
   }
 
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
index 2f22025..9457bb8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
@@ -19,21 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests covering file seek.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractSeek extends AbstractContractSeekTest {
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractSeek(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractSeek.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -45,4 +59,9 @@ public class ITestOzoneContractSeek extends AbstractContractSeekTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
index e40b22e..7f55774 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
@@ -21,18 +21,31 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import java.io.IOException;
+import java.util.Collection;
 
 /**
  * Ozone contract tests for {@link org.apache.hadoop.fs.CanUnbuffer#unbuffer}.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractUnbuffer extends AbstractContractUnbufferTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractUnbuffer(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractUnbuffer.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -44,4 +57,9 @@ public class ITestOzoneContractUnbuffer extends AbstractContractUnbufferTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java
new file mode 100644
index 0000000..1926bd2
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone.contract;
+
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Utility class for Ozone-contract tests.
+ */
+public final class ITestOzoneContractUtils {
+
+  private ITestOzoneContractUtils(){}
+
+  private static List<Object> fsoCombinations = Arrays.asList(new Object[] {
+      // FSO configuration is a cluster level server side configuration.
+      // If the cluster is configured with V0 layout version,
+      // V0 bucket will created.
+      // If the cluster is configured with V1 layout version,
+      // V1 bucket will be created.
+      // Presently, OzoneClient checks bucketMetadata then invokes V1 or V0
+      // specific code and it makes no sense to add client side configs now.
+      // Once the specific client API to set FSO or non-FSO bucket is provided
+      // the contract test can be refactored to include another parameter
+      // (fsoClient) which sets/unsets the client side configs.
+      true, // Server is configured with new layout (V1)
+      // and new buckets will be operated on
+      false // Server is configured with old layout (V0)
+      // and old buckets will be operated on
+  });
+
+  static List<Object> getFsoCombinations(){
+    return fsoCombinations;
+  }
+
+  public static void restartCluster(boolean fsOptimizedServer)
+      throws IOException {
+    OzoneContract.destroyCluster();
+    OzoneContract.initOzoneConfiguration(
+        fsOptimizedServer);
+    OzoneContract.createCluster();
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
index 104b10c..f401c06 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -46,6 +46,8 @@ class OzoneContract extends AbstractFSContract {
   private static MiniOzoneCluster cluster;
   private static final String CONTRACT_XML = "contract/ozone.xml";
 
+  private static boolean fsOptimizedServer;
+
   OzoneContract(Configuration conf) {
     super(conf);
     //insert the base features
@@ -63,6 +65,10 @@ class OzoneContract extends AbstractFSContract {
     return path;
   }
 
+  public static void initOzoneConfiguration(boolean fsoServer){
+    fsOptimizedServer = fsoServer;
+  }
+
   public static void createCluster() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
     DatanodeRatisServerConfig ratisServerConfig =
@@ -79,6 +85,13 @@ class OzoneContract extends AbstractFSContract {
 
     conf.addResource(CONTRACT_XML);
 
+    if (fsOptimizedServer){
+      conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+          true);
+      conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION,
+          OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+    }
+
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
     try {
       cluster.waitForClusterToBeReady();
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index d2b9a6d..1214a5c 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -414,9 +414,11 @@ public class BasicOzoneFileSystem extends FileSystem {
     try {
       adapter.renameKey(srcPath, dstPath);
     } catch (OMException ome) {
-      LOG.error("rename key failed: {}. source:{}, destin:{}",
-              ome.getMessage(), srcPath, dstPath);
-      if (OMException.ResultCodes.KEY_ALREADY_EXISTS == ome.getResult()) {
+      LOG.error("rename key failed: {}. Error code: {} source:{}, destin:{}",
+              ome.getMessage(), ome.getResult(), srcPath, dstPath);
+      if (OMException.ResultCodes.KEY_ALREADY_EXISTS == ome.getResult() ||
+          OMException.ResultCodes.KEY_RENAME_ERROR  == ome.getResult() ||
+          OMException.ResultCodes.KEY_NOT_FOUND == ome.getResult()) {
         return false;
       } else {
         throw ome;
@@ -508,6 +510,9 @@ public class BasicOzoneFileSystem extends FileSystem {
 
     if (adapter.isFSOptimizedBucket()) {
       if (f.isRoot()) {
+        if (!recursive && listStatus(f).length!=0){
+          throw new PathIsNotEmptyDirectoryException(f.toString());
+        }
         LOG.warn("Cannot delete root directory.");
         return false;
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 08/19: HDDS-4514. AllocateBlock : lookup and update open file table for the given path (#1679)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 90e8362d5c71c0c21ca53ef29ebd626cd3f4278f
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Tue Dec 22 09:07:25 2020 +0530

    HDDS-4514. AllocateBlock : lookup and update open file table for the given path (#1679)
---
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  74 +++++++
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     |  25 ---
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../om/request/file/OMFileCreateRequestV1.java     |   3 +
 .../ozone/om/request/file/OMFileRequest.java       |  52 +++++
 .../om/request/key/OMAllocateBlockRequestV1.java   | 227 +++++++++++++++++++++
 .../ozone/om/request/key/OMKeyCommitRequestV1.java |  76 +------
 .../om/response/key/OMAllocateBlockResponse.java   |  12 ++
 ...esponse.java => OMAllocateBlockResponseV1.java} |  49 ++---
 .../om/request/file/TestOMFileCreateRequest.java   |  22 --
 .../om/request/file/TestOMFileCreateRequestV1.java |   5 +
 .../om/request/key/TestOMAllocateBlockRequest.java |  44 ++--
 .../request/key/TestOMAllocateBlockRequestV1.java  | 119 +++++++++++
 .../om/request/key/TestOMKeyCommitRequestV1.java   |   5 +
 .../om/request/key/TestOMKeyDeleteRequestV1.java   |  14 ++
 .../ozone/om/request/key/TestOMKeyRequest.java     |  25 +++
 .../response/file/TestOMFileCreateResponseV1.java  |   5 +
 .../response/key/TestOMAllocateBlockResponse.java  |  37 ++--
 ...eV1.java => TestOMAllocateBlockResponseV1.java} |  76 +++----
 .../om/response/key/TestOMKeyCommitResponseV1.java |   5 +
 .../om/response/key/TestOMKeyDeleteResponseV1.java |  16 ++
 21 files changed, 667 insertions(+), 228 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 60a28ce..44ea7b9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.TrashPolicy;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -582,6 +583,7 @@ public class TestOzoneFileSystem {
   @Test
   public void testListStatusOnLargeDirectory() throws Exception {
     Path root = new Path("/");
+    deleteRootDir(); // cleanup
     Set<String> paths = new TreeSet<>();
     int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2;
     for(int i = 0; i < numDirs; i++) {
@@ -591,6 +593,21 @@ public class TestOzoneFileSystem {
     }
 
     FileStatus[] fileStatuses = o3fs.listStatus(root);
+    // Added logs for debugging failures, to check any sub-path mismatches.
+    Set<String> actualPaths = new TreeSet<>();
+    ArrayList<String> actualPathList = new ArrayList<>();
+    if (rootItemCount != fileStatuses.length) {
+      for (int i = 0; i < fileStatuses.length; i++) {
+        actualPaths.add(fileStatuses[i].getPath().getName());
+        actualPathList.add(fileStatuses[i].getPath().getName());
+      }
+      if (rootItemCount != actualPathList.size()) {
+        actualPaths.removeAll(paths);
+        actualPathList.removeAll(paths);
+        LOG.info("actualPaths: {}", actualPaths);
+        LOG.info("actualPathList: {}", actualPathList);
+      }
+    }
     assertEquals(
         "Total directories listed do not match the existing directories",
         numDirs, fileStatuses.length);
@@ -601,6 +618,31 @@ public class TestOzoneFileSystem {
   }
 
   /**
+   * Cleanup files and directories.
+   *
+   * @throws IOException DB failure
+   */
+  protected void deleteRootDir() throws IOException {
+    Path root = new Path("/");
+    FileStatus[] fileStatuses = fs.listStatus(root);
+
+    rootItemCount = 0; // reset to zero
+
+    if (fileStatuses == null) {
+      return;
+    }
+
+    for (FileStatus fStatus : fileStatuses) {
+      fs.delete(fStatus.getPath(), true);
+    }
+
+    fileStatuses = fs.listStatus(root);
+    if (fileStatuses != null) {
+      Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
+    }
+  }
+
+  /**
    * Tests listStatus on a path with subdirs.
    */
   @Test
@@ -661,6 +703,38 @@ public class TestOzoneFileSystem {
   }
 
   @Test
+  public void testAllocateMoreThanOneBlock() throws IOException {
+    Path file = new Path("/file");
+    String str = "TestOzoneFileSystemV1.testSeekOnFileLength";
+    byte[] strBytes = str.getBytes();
+    long numBlockAllocationsOrg =
+            cluster.getOzoneManager().getMetrics().getNumBlockAllocates();
+
+    try (FSDataOutputStream out1 = fs.create(file, FsPermission.getDefault(),
+            true, 8, (short) 3, 1, null)) {
+      for (int i = 0; i < 100000; i++) {
+        out1.write(strBytes);
+      }
+    }
+
+    try (FSDataInputStream stream = fs.open(file)) {
+      FileStatus fileStatus = fs.getFileStatus(file);
+      long blkSize = fileStatus.getBlockSize();
+      long fileLength = fileStatus.getLen();
+      Assert.assertTrue("Block allocation should happen",
+              fileLength > blkSize);
+
+      long newNumBlockAllocations =
+              cluster.getOzoneManager().getMetrics().getNumBlockAllocates();
+
+      Assert.assertTrue("Block allocation should happen",
+              (newNumBlockAllocations > numBlockAllocationsOrg));
+
+      stream.seek(fileLength);
+      assertEquals(-1, stream.read());
+    }
+  }
+
   public void testDeleteRoot() throws IOException {
     Path dir = new Path("/dir");
     fs.mkdirs(dir);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index 212080b..2938714 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -407,31 +407,6 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     }
   }
 
-  /**
-   * Cleanup files and directories.
-   *
-   * @throws IOException DB failure
-   */
-  protected void deleteRootDir() throws IOException {
-    Path root = new Path("/");
-    FileStatus[] fileStatuses = fs.listStatus(root);
-
-    rootItemCount = 0; // reset to zero
-
-    if (fileStatuses == null) {
-      return;
-    }
-
-    for (FileStatus fStatus : fileStatuses) {
-      fs.delete(fStatus.getPath(), true);
-    }
-
-    fileStatuses = fs.listStatus(root);
-    if (fileStatuses != null) {
-      Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
-    }
-  }
-
   @Override
   @Test
   @Ignore("TODO:HDDS-2939")
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 7b5988c..31120f1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
 import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
+import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
@@ -138,6 +139,9 @@ public final class OzoneManagerRatisUtils {
     case SetBucketProperty:
       return new OMBucketSetPropertyRequest(omRequest);
     case AllocateBlock:
+      if (omLayoutVersionV1) {
+        return new OMAllocateBlockRequestV1(omRequest);
+      }
       return new OMAllocateBlockRequest(omRequest);
     case CreateKey:
       return new OMKeyCreateRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
index 606e15b..e38908a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -139,6 +139,9 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
                 pathInfoV1.getLeafNodeName());
         dbFileInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
                 omMetadataManager, dbFileKey, keyName);
+        if (dbFileInfo != null) {
+          ozoneManager.getKeyManager().refresh(dbFileInfo);
+        }
       }
 
       // check if the file or directory already existed in OM
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index fc9bab0..aadc126 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -52,6 +52,8 @@ import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nonnull;
 
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 
 /**
  * Base class for file requests.
@@ -851,4 +853,54 @@ public final class OMFileRequest {
   public static boolean isImmediateChild(long parentId, long ancestorId) {
     return parentId == ancestorId;
   }
+
+  /**
+   * Get parent id for the user given path.
+   *
+   * @param bucketId       bucket id
+   * @param pathComponents fie path elements
+   * @param keyName        user given key name
+   * @param omMetadataManager   om metadata manager
+   * @return lastKnownParentID
+   * @throws IOException DB failure or parent not exists in DirectoryTable
+   */
+  public static long getParentID(long bucketId, Iterator<Path> pathComponents,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+
+    long lastKnownParentId = bucketId;
+
+    // If no sub-dirs then bucketID is the root/parent.
+    if(!pathComponents.hasNext()){
+      return bucketId;
+    }
+
+    OmDirectoryInfo omDirectoryInfo;
+    while (pathComponents.hasNext()) {
+      String nodeName = pathComponents.next().toString();
+      boolean reachedLastPathComponent = !pathComponents.hasNext();
+      String dbNodeName =
+              omMetadataManager.getOzonePathKey(lastKnownParentId, nodeName);
+
+      omDirectoryInfo = omMetadataManager.
+              getDirectoryTable().get(dbNodeName);
+      if (omDirectoryInfo != null) {
+        if (reachedLastPathComponent) {
+          throw new OMException("Can not create file: " + keyName +
+                  " as there is already directory in the given path",
+                  NOT_A_FILE);
+        }
+        lastKnownParentId = omDirectoryInfo.getObjectID();
+      } else {
+        // One of the sub-dir doesn't exists in DB. Immediate parent should
+        // exists for committing the key, otherwise will fail the operation.
+        if (!reachedLastPathComponent) {
+          throw new OMException("Failed to find parent directory of "
+                  + keyName + " in DirectoryTable", KEY_NOT_FOUND);
+        }
+        break;
+      }
+    }
+
+    return lastKnownParentId;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java
new file mode 100644
index 0000000..a6a2558
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java
@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponse;
+import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.jetbrains.annotations.NotNull;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles allocate block request layout version V1.
+ */
+public class OMAllocateBlockRequestV1 extends OMAllocateBlockRequest {
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(OMAllocateBlockRequestV1.class);
+
+  public OMAllocateBlockRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    AllocateBlockRequest allocateBlockRequest =
+            getOmRequest().getAllocateBlockRequest();
+
+    KeyArgs keyArgs =
+            allocateBlockRequest.getKeyArgs();
+
+    OzoneManagerProtocolProtos.KeyLocation blockLocation =
+            allocateBlockRequest.getKeyLocation();
+    Preconditions.checkNotNull(blockLocation);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+    long clientID = allocateBlockRequest.getClientID();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumBlockAllocateCalls();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID));
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    String openKeyName = null;
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+            getOmRequest());
+    OMClientResponse omClientResponse = null;
+
+    OmKeyInfo openKeyInfo = null;
+    IOException exception = null;
+    OmBucketInfo omBucketInfo = null;
+    boolean acquiredLock = false;
+
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acl
+      checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.WRITE, allocateBlockRequest.getClientID());
+
+      validateBucketAndVolume(omMetadataManager, volumeName,
+          bucketName);
+
+      // Here we don't acquire bucket/volume lock because for a single client
+      // allocateBlock is called in serial fashion. With this approach, it
+      // won't make 'fail-fast' during race condition case on delete/rename op,
+      // assuming that later it will fail at the key commit operation.
+      openKeyName = getOpenKeyName(volumeName, bucketName, keyName, clientID,
+              ozoneManager);
+      openKeyInfo = getOpenKeyInfo(omMetadataManager, openKeyName, keyName);
+      if (openKeyInfo == null) {
+        throw new OMException("Open Key not found " + openKeyName,
+                KEY_NOT_FOUND);
+      }
+
+      List<OmKeyLocationInfo> newLocationList = Collections.singletonList(
+              OmKeyLocationInfo.getFromProtobuf(blockLocation));
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+      // check bucket and volume quota
+      long preAllocatedSpace = newLocationList.size()
+              * ozoneManager.getScmBlockSize()
+              * openKeyInfo.getFactor().getNumber();
+      checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace);
+      // Append new block
+      openKeyInfo.appendNewBlocks(newLocationList, false);
+
+      // Set modification time.
+      openKeyInfo.setModificationTime(keyArgs.getModificationTime());
+
+      // Set the UpdateID to current transactionLogIndex
+      openKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+
+      // Add to cache.
+      addOpenTableCacheEntry(trxnLogIndex, omMetadataManager, openKeyName,
+              openKeyInfo);
+      omBucketInfo.incrUsedBytes(preAllocatedSpace);
+
+      omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder()
+              .setKeyLocation(blockLocation).build());
+      omClientResponse = getOmClientResponse(clientID, omResponse,
+              openKeyInfo, omBucketInfo.copyObject());
+      LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}",
+              volumeName, bucketName, openKeyName);
+    } catch (IOException ex) {
+      omMetrics.incNumBlockAllocateCallFails();
+      exception = ex;
+      omClientResponse = new OMAllocateBlockResponse(createErrorOMResponse(
+              omResponse, exception));
+      LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " +
+              "Exception:{}", volumeName, bucketName, openKeyName, exception);
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+              omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+                bucketName);
+      }
+    }
+
+    auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap,
+            exception, getOmRequest().getUserInfo()));
+
+    return omClientResponse;
+  }
+
+  private OmKeyInfo getOpenKeyInfo(OMMetadataManager omMetadataManager,
+      String openKeyName, String keyName) throws IOException {
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+            omMetadataManager, openKeyName, fileName);
+  }
+
+  private String getOpenKeyName(String volumeName, String bucketName,
+      String keyName, long clientID, OzoneManager ozoneManager)
+          throws IOException {
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+            keyName, omMetadataManager);
+    return omMetadataManager.getOpenFileName(parentID, fileName,
+            clientID);
+  }
+
+  private void addOpenTableCacheEntry(long trxnLogIndex,
+      OMMetadataManager omMetadataManager, String openKeyName,
+      OmKeyInfo openKeyInfo) {
+    String fileName = openKeyInfo.getFileName();
+    OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, openKeyName,
+            openKeyInfo, fileName, trxnLogIndex);
+  }
+
+  @NotNull
+  private OMClientResponse getOmClientResponse(long clientID,
+      OMResponse.Builder omResponse, OmKeyInfo openKeyInfo,
+      OmBucketInfo omBucketInfo) {
+    return new OMAllocateBlockResponseV1(omResponse.build(), openKeyInfo,
+            clientID, omBucketInfo);
+  }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
index 66b28ae..1ea36cc 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -53,7 +52,6 @@ import java.util.List;
 import java.util.Map;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 
 /**
@@ -130,8 +128,8 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
       String fileName = OzoneFSUtils.getFileName(keyName);
       omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
       long bucketId = omBucketInfo.getObjectID();
-      long parentID = getParentID(bucketId, pathComponents, keyName,
-              omMetadataManager, ozoneManager);
+      long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+              keyName, omMetadataManager);
       String dbFileKey = omMetadataManager.getOzonePathKey(parentID, fileName);
       dbOpenFileKey = omMetadataManager.getOpenFileName(parentID, fileName,
               commitKeyRequest.getClientID());
@@ -197,74 +195,4 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
 
     return omClientResponse;
   }
-
-
-  /**
-   * Check for directory exists with same name, if it exists throw error.
-   *
-   * @param keyName                  key name
-   * @param ozoneManager             Ozone Manager
-   * @param reachedLastPathComponent true if the path component is a fileName
-   * @throws IOException if directory exists with same name
-   */
-  private void checkDirectoryAlreadyExists(String keyName,
-                                           OzoneManager ozoneManager,
-                                           boolean reachedLastPathComponent)
-          throws IOException {
-    // Reached last component, which would be a file. Returns its parentID.
-    if (reachedLastPathComponent && ozoneManager.getEnableFileSystemPaths()) {
-      throw new OMException("Can not create file: " + keyName +
-              " as there is already directory in the given path", NOT_A_FILE);
-    }
-  }
-
-  /**
-   * Get parent id for the user given path.
-   *
-   * @param bucketId          bucket id
-   * @param pathComponents    fie path elements
-   * @param keyName           user given key name
-   * @param omMetadataManager metadata manager
-   * @return lastKnownParentID
-   * @throws IOException DB failure or parent not exists in DirectoryTable
-   */
-  private long getParentID(long bucketId, Iterator<Path> pathComponents,
-                           String keyName, OMMetadataManager omMetadataManager,
-                           OzoneManager ozoneManager)
-          throws IOException {
-
-    long lastKnownParentId = bucketId;
-
-    // If no sub-dirs then bucketID is the root/parent.
-    if(!pathComponents.hasNext()){
-      return bucketId;
-    }
-
-    OmDirectoryInfo omDirectoryInfo;
-    while (pathComponents.hasNext()) {
-      String nodeName = pathComponents.next().toString();
-      boolean reachedLastPathComponent = !pathComponents.hasNext();
-      String dbNodeName =
-              omMetadataManager.getOzonePathKey(lastKnownParentId, nodeName);
-
-      omDirectoryInfo = omMetadataManager.
-              getDirectoryTable().get(dbNodeName);
-      if (omDirectoryInfo != null) {
-        checkDirectoryAlreadyExists(keyName, ozoneManager,
-                reachedLastPathComponent);
-        lastKnownParentId = omDirectoryInfo.getObjectID();
-      } else {
-        // One of the sub-dir doesn't exists in DB. Immediate parent should
-        // exists for committing the key, otherwise will fail the operation.
-        if (!reachedLastPathComponent) {
-          throw new OMException("Failed to commit key, as parent directory of "
-                  + keyName + " entry is not found in DirectoryTable",
-                  KEY_NOT_FOUND);
-        }
-        break;
-      }
-    }
-
-    return lastKnownParentId;
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
index 4b20853..c97d702 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
@@ -74,4 +74,16 @@ public class OMAllocateBlockResponse extends OMClientResponse {
         omMetadataManager.getBucketKey(omKeyInfo.getVolumeName(),
             omKeyInfo.getBucketName()), omBucketInfo);
   }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
+
+  protected long getClientID() {
+    return clientID;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
similarity index 59%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
index 4b20853..ef8b639 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
@@ -18,60 +18,43 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
-import java.io.IOException;
 import javax.annotation.Nonnull;
+import java.io.IOException;
 
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
- * Response for AllocateBlock request.
+ * Response for AllocateBlock request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE})
-public class OMAllocateBlockResponse extends OMClientResponse {
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE})
+public class OMAllocateBlockResponseV1 extends OMAllocateBlockResponse {
 
-  private OmKeyInfo omKeyInfo;
-  private long clientID;
-  private OmBucketInfo omBucketInfo;
-
-  public OMAllocateBlockResponse(@Nonnull OMResponse omResponse,
+  public OMAllocateBlockResponseV1(@Nonnull OMResponse omResponse,
       @Nonnull OmKeyInfo omKeyInfo, long clientID,
       @Nonnull OmBucketInfo omBucketInfo) {
-    super(omResponse);
-    this.omKeyInfo = omKeyInfo;
-    this.clientID = clientID;
-    this.omBucketInfo = omBucketInfo;
-  }
-
-  /**
-   * For when the request is not successful.
-   * For a successful request, the other constructor should be used.
-   */
-  public OMAllocateBlockResponse(@Nonnull OMResponse omResponse) {
-    super(omResponse);
-    checkStatusNotOK();
+    super(omResponse, omKeyInfo, clientID, omBucketInfo);
   }
 
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
-    String openKey = omMetadataManager.getOpenKey(omKeyInfo.getVolumeName(),
-        omKeyInfo.getBucketName(), omKeyInfo.getKeyName(), clientID);
-    omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, openKey,
-        omKeyInfo);
+    OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation,
+            getOmKeyInfo(), getClientID());
 
     // update bucket usedBytes.
     omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-        omMetadataManager.getBucketKey(omKeyInfo.getVolumeName(),
-            omKeyInfo.getBucketName()), omBucketInfo);
+            omMetadataManager.getBucketKey(getOmKeyInfo().getVolumeName(),
+                    getOmKeyInfo().getBucketName()), getOmBucketInfo());
   }
 }
+
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
index a963f88..0a76589 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
@@ -398,28 +398,6 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
   }
 
   /**
-   * Verify path in open key table. Also, it returns OMKeyInfo for the given
-   * key path.
-   *
-   * @param key      key name
-   * @param id       client id
-   * @param doAssert if true then do assertion, otherwise it just skip.
-   * @return om key info for the given key path.
-   * @throws Exception DB failure
-   */
-  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
-                                               boolean doAssert)
-          throws Exception {
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-            key, id);
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-    if (doAssert) {
-      Assert.assertNotNull("Failed to find key in OpenKeyTable", omKeyInfo);
-    }
-    return omKeyInfo;
-  }
-
-  /**
    * Gets OMFileCreateRequest reference.
    *
    * @param omRequest om request
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
index 7ded386..046ac90 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.util.StringUtils;
@@ -183,6 +184,10 @@ public class TestOMFileCreateRequestV1 extends TestOMFileCreateRequest {
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
     config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
index 4b3d38f..9d26d0d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.UUID;
 
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -57,21 +58,19 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable(volumeName, bucketName);
 
     OMRequest modifiedOmRequest =
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
     // Check before calling validateAndUpdateCache. As adding DB entry has
     // not added any blocks, so size should be zero.
 
-    OmKeyInfo omKeyInfo =
-        omMetadataManager.getOpenKeyTable().get(omMetadataManager.getOpenKey(
-            volumeName, bucketName, keyName, clientID));
+    OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(keyName, clientID,
+            true);
 
     List<OmKeyLocationInfo> omKeyLocationInfo =
         omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -87,10 +86,8 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
 
     // Check open table whether new block is added or not.
 
-    omKeyInfo =
-        omMetadataManager.getOpenKeyTable().get(omMetadataManager.getOpenKey(
-            volumeName, bucketName, keyName, clientID));
-
+    omKeyInfo = verifyPathInOpenKeyTable(keyName, clientID,
+            true);
 
     // Check modification time
     Assert.assertEquals(modifiedOmRequest.getAllocateBlockRequest()
@@ -119,6 +116,12 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
 
   }
 
+  @NotNull
+  protected OMAllocateBlockRequest getOmAllocateBlockRequest(
+          OMRequest modifiedOmRequest) {
+    return new OMAllocateBlockRequest(modifiedOmRequest);
+  }
+
   @Test
   public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
 
@@ -126,7 +129,7 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
 
     OMClientResponse omAllocateBlockResponse =
@@ -145,7 +148,7 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
 
     // Added only volume to DB.
@@ -168,7 +171,7 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
     // Add volume, bucket entries to DB.
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
@@ -190,10 +193,11 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
    * @return OMRequest - modified request returned from preExecute.
    * @throws Exception
    */
-  private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
+  protected OMRequest doPreExecute(OMRequest originalOMRequest)
+      throws Exception {
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(originalOMRequest);
+            getOmAllocateBlockRequest(originalOMRequest);
 
     OMRequest modifiedOmRequest =
         omAllocateBlockRequest.preExecute(ozoneManager);
@@ -228,7 +232,7 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
   }
 
 
-  private OMRequest createAllocateBlockRequest() {
+  protected OMRequest createAllocateBlockRequest() {
 
     KeyArgs keyArgs = KeyArgs.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
@@ -246,4 +250,12 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
         .setAllocateBlockRequest(allocateBlockRequest).build();
 
   }
+
+  protected String addKeyToOpenKeyTable(String volumeName, String bucketName)
+          throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
+            keyName, clientID, replicationType, replicationFactor,
+            omMetadataManager);
+    return "";
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java
new file mode 100644
index 0000000..4e74979
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone.om.request.key;
+
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests OMAllocateBlockRequest class layout version V1.
+ */
+public class TestOMAllocateBlockRequestV1 extends TestOMAllocateBlockRequest {
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    return config;
+  }
+
+  protected String addKeyToOpenKeyTable(String volumeName, String bucketName)
+          throws Exception {
+    // need to initialize parentID
+    String parentDir = keyName;
+    String fileName = "file1";
+    keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName;
+
+    // add parentDir to dirTable
+    long parentID = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, parentDir, omMetadataManager);
+    long txnId = 50;
+    long objectId = parentID + 1;
+
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
+                    Time.now());
+
+    // add key to openFileTable
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+
+    return omMetadataManager.getOzonePathKey(parentID, fileName);
+  }
+
+  @NotNull
+  protected OMAllocateBlockRequest getOmAllocateBlockRequest(
+          OzoneManagerProtocolProtos.OMRequest modifiedOmRequest) {
+    return new OMAllocateBlockRequestV1(modifiedOmRequest);
+  }
+
+  @Override
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+      boolean doAssert) throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      if (indx == pathComponents.length - 1) {
+        String dbOpenFileName = omMetadataManager.getOpenFileName(
+                parentId, pathElement, id);
+        OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+                .get(dbOpenFileName);
+        if (doAssert) {
+          Assert.assertNotNull("Invalid key!", omKeyInfo);
+        }
+        return omKeyInfo;
+      } else {
+        // directory
+        String dbKey = omMetadataManager.getOzonePathKey(parentId,
+                pathElement);
+        OmDirectoryInfo dirInfo =
+                omMetadataManager.getDirectoryTable().get(dbKey);
+        parentId = dirInfo.getObjectID();
+      }
+    }
+    if (doAssert) {
+      Assert.fail("Invalid key!");
+    }
+    return  null;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
index f5168e1..ed1e2bd 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.util.Time;
@@ -87,6 +88,10 @@ public class TestOMKeyCommitRequestV1 extends TestOMKeyCommitRequest {
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
     config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
index dbba143..7527e78 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
@@ -18,8 +18,11 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.util.Time;
@@ -54,4 +57,15 @@ public class TestOMKeyDeleteRequestV1 extends TestOMKeyDeleteRequest {
             fileName, omKeyInfo, -1, 50, omMetadataManager);
     return omKeyInfo.getPath();
   }
+
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    return config;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
index 4bf66bb..33f58bb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
@@ -27,11 +27,13 @@ import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.ozone.om.ResolvedBucket;
 import org.apache.hadoop.ozone.om.KeyManager;
 import org.apache.hadoop.ozone.om.KeyManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
 import org.jetbrains.annotations.NotNull;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.rules.TemporaryFolder;
@@ -179,6 +181,29 @@ public class TestOMKeyRequest {
     return new OzoneConfiguration();
   }
 
+
+  /**
+   * Verify path in open key table. Also, it returns OMKeyInfo for the given
+   * key path.
+   *
+   * @param key      key name
+   * @param id       client id
+   * @param doAssert if true then do assertion, otherwise it just skip.
+   * @return om key info for the given key path.
+   * @throws Exception DB failure
+   */
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+                                               boolean doAssert)
+          throws Exception {
+    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
+            key, id);
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    if (doAssert) {
+      Assert.assertNotNull("Failed to find key in OpenKeyTable", omKeyInfo);
+    }
+    return omKeyInfo;
+  }
+
   @After
   public void stop() {
     omMetrics.unRegister();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
index 19a1bb9..bc4345e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
 import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse;
@@ -68,6 +69,10 @@ public class TestOMFileCreateResponseV1 extends TestOMKeyCreateResponse {
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
     config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
index 602ec99..33c16ae 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -37,8 +38,7 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
+    OmKeyInfo omKeyInfo = createOmKeyInfo();
     OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
@@ -50,11 +50,9 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
         .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
         .build();
     OMAllocateBlockResponse omAllocateBlockResponse =
-        new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
-            omBucketInfo);
+            getOmAllocateBlockResponse(omKeyInfo, omBucketInfo, omResponse);
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKey();
 
     // Not adding key entry before to test whether commit is successful or not.
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
@@ -68,8 +66,7 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
 
   @Test
   public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
+    OmKeyInfo omKeyInfo = createOmKeyInfo();
     OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
@@ -81,12 +78,10 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
         .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
         .build();
     OMAllocateBlockResponse omAllocateBlockResponse =
-        new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
-            omBucketInfo);
+            getOmAllocateBlockResponse(omKeyInfo, omBucketInfo, omResponse);
 
     // Before calling addToDBBatch
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKey();
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
     omAllocateBlockResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
@@ -98,4 +93,22 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
   }
+
+  protected OmKeyInfo createOmKeyInfo() throws Exception {
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, replicationType, replicationFactor);
+  }
+
+  protected String getOpenKey() throws Exception {
+    return omMetadataManager.getOpenKey(volumeName, bucketName,
+            keyName, clientID);
+  }
+
+  @NotNull
+  protected OMAllocateBlockResponse getOmAllocateBlockResponse(
+          OmKeyInfo omKeyInfo, OmBucketInfo omBucketInfo,
+          OMResponse omResponse) {
+    return new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
+            omBucketInfo);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
similarity index 53%
copy from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
copy to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
index 369faa9..e105a37 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
@@ -20,75 +20,54 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.util.Time;
 import org.jetbrains.annotations.NotNull;
-import org.junit.Assert;
 
 /**
- * Tests OMKeyCommitResponse layout version V1.
+ * Tests OMAllocateBlockResponse layout version V1.
  */
-public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
+public class TestOMAllocateBlockResponseV1
+        extends TestOMAllocateBlockResponse {
 
-  @NotNull
-  protected OMKeyCommitResponse getOmKeyCommitResponse(
-          OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
-          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
-          String ozoneKey) {
-    Assert.assertNotNull(omBucketInfo);
-    return new OMKeyCommitResponseV1(
-            omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
-            omBucketInfo);
-  }
+  // logical ID, which really doesn't exist in dirTable
+  private long parentID = 10;
+  private String fileName = "file1";
 
-  @NotNull
-  @Override
-  protected OmKeyInfo getOmKeyInfo() {
-    Assert.assertNotNull(omBucketInfo);
-    return TestOMRequestUtils.createOmKeyInfo(volumeName,
-            omBucketInfo.getBucketName(), keyName, replicationType,
-            replicationFactor,
-            omBucketInfo.getObjectID() + 1,
-            omBucketInfo.getObjectID(), 100, Time.now());
-  }
+  protected OmKeyInfo createOmKeyInfo() throws Exception {
+    // need to initialize parentID
+    String parentDir = keyName;
+    keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName;
 
-  @NotNull
-  @Override
-  protected void addKeyToOpenKeyTable() throws Exception {
-    Assert.assertNotNull(omBucketInfo);
-    long parentID = omBucketInfo.getObjectID();
-    long objectId = parentID + 10;
+    long txnId = 50;
+    long objectId = parentID + 1;
 
     OmKeyInfo omKeyInfoV1 =
             TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
                     HddsProtos.ReplicationType.RATIS,
-                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
                     Time.now());
-
-    String fileName = OzoneFSUtils.getFileName(keyName);
-    TestOMRequestUtils.addFileToKeyTable(true, false,
-            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+    return omKeyInfoV1;
   }
 
-  @NotNull
-  @Override
-  protected String getOpenKeyName() {
-    Assert.assertNotNull(omBucketInfo);
+  protected String getOpenKey() throws Exception {
     return omMetadataManager.getOpenFileName(
-            omBucketInfo.getObjectID(), keyName, clientID);
+            parentID, fileName, clientID);
   }
 
   @NotNull
-  @Override
-  protected String getOzoneKey() {
-    Assert.assertNotNull(omBucketInfo);
-    return omMetadataManager.getOzonePathKey(omBucketInfo.getObjectID(),
-            keyName);
+  protected OMAllocateBlockResponse getOmAllocateBlockResponse(
+          OmKeyInfo omKeyInfo, OmVolumeArgs omVolumeArgs,
+          OmBucketInfo omBucketInfo, OMResponse omResponse) {
+    return new OMAllocateBlockResponseV1(omResponse, omKeyInfo, clientID,
+            omBucketInfo);
   }
 
   @NotNull
@@ -96,6 +75,11 @@ public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
     config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
     return config;
   }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
index 369faa9..1e59ce8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.util.Time;
@@ -96,6 +97,10 @@ public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
     config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
index 3cfec38..d35c79e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
@@ -18,11 +18,15 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 
 /**
@@ -67,4 +71,16 @@ public class TestOMKeyDeleteResponseV1 extends TestOMKeyDeleteResponse {
             getOmBucketInfo().getObjectID() + 1,
             getOmBucketInfo().getObjectID(), 100, Time.now());
   }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    return config;
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 12/19: HDDS-4720. RenameKey : add unit test to verify bucket#renameKey (#1847)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 9abd402ae5dc8b59eebe8f46fa1a666e5cabfcd6
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Jan 27 22:44:09 2021 +0530

    HDDS-4720. RenameKey : add unit test to verify bucket#renameKey (#1847)
---
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 112 +++++++++++++++++++++
 1 file changed, 112 insertions(+)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index b88bbc3..d343e2c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.KeyOutputStream;
@@ -52,8 +53,12 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.UUID;
 
+import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_EXISTS;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -264,6 +269,113 @@ public class TestObjectStoreV1 {
             dirPathC.getObjectID(), true);
   }
 
+  @Test
+  public void testRenameKey() throws IOException {
+    String fromKeyName = UUID.randomUUID().toString();
+    String value = "sample value";
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    createTestKey(bucket, fromKeyName, value);
+
+    // Rename to empty string should fail.
+    String toKeyName = "";
+    try {
+      bucket.renameKey(fromKeyName, toKeyName);
+      fail("Rename to empty string should fail!");
+    } catch (OMException ome) {
+      Assert.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME,
+              ome.getResult());
+    }
+
+    toKeyName = UUID.randomUUID().toString();
+    bucket.renameKey(fromKeyName, toKeyName);
+
+    // Lookup for old key should fail.
+    try {
+      bucket.getKey(fromKeyName);
+      fail("Lookup for old from key name should fail!");
+    } catch (OMException ome) {
+      Assert.assertEquals(KEY_NOT_FOUND, ome.getResult());
+    }
+
+    OzoneKey key = bucket.getKey(toKeyName);
+    Assert.assertEquals(toKeyName, key.getName());
+  }
+
+  @Test
+  public void testKeyRenameWithSubDirs() throws Exception {
+    String keyName1 = "dir1/dir2/file1";
+    String keyName2 = "dir1/dir2/file2";
+
+    String newKeyName1 = "dir1/key1";
+    String newKeyName2 = "dir1/key2";
+
+    String value = "sample value";
+    OzoneClient client = cluster.getClient();
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    createTestKey(bucket, keyName1, value);
+    createTestKey(bucket, keyName2, value);
+
+    bucket.renameKey(keyName1, newKeyName1);
+    bucket.renameKey(keyName2, newKeyName2);
+
+    // new key should exist
+    Assert.assertEquals(newKeyName1, bucket.getKey(newKeyName1).getName());
+    Assert.assertEquals(newKeyName2, bucket.getKey(newKeyName2).getName());
+
+    // old key should not exist
+    assertKeyRenamedEx(bucket, keyName1);
+    assertKeyRenamedEx(bucket, keyName2);
+  }
+
+  @Test
+  public void testRenameToAnExistingKey() throws Exception {
+    String keyName1 = "dir1/dir2/file1";
+    String keyName2 = "dir1/dir2/file2";
+
+    String value = "sample value";
+    OzoneClient client = cluster.getClient();
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    createTestKey(bucket, keyName1, value);
+    createTestKey(bucket, keyName2, value);
+
+    try {
+      bucket.renameKey(keyName1, keyName2);
+      fail("Should throw exception as destin key already exists!");
+    } catch (OMException e) {
+      Assert.assertEquals(KEY_ALREADY_EXISTS, e.getResult());
+    }
+  }
+
+  private void assertKeyRenamedEx(OzoneBucket bucket, String keyName)
+      throws Exception {
+    OMException oe = null;
+    try {
+      bucket.getKey(keyName);
+    } catch (OMException e) {
+      oe = e;
+    }
+    Assert.assertEquals(KEY_NOT_FOUND, oe.getResult());
+  }
+
+  private void createTestKey(OzoneBucket bucket, String keyName,
+      String keyValue) throws IOException {
+    OzoneOutputStream out = bucket.createKey(keyName,
+            keyValue.getBytes().length, STAND_ALONE,
+            ONE, new HashMap<>());
+    out.write(keyValue.getBytes());
+    out.close();
+    OzoneKey key = bucket.getKey(keyName);
+    Assert.assertEquals(keyName, key.getName());
+  }
+
   private OmDirectoryInfo getDirInfo(String parentKey) throws Exception {
     OMMetadataManager omMetadataManager =
             cluster.getOzoneManager().getMetadataManager();


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 15/19: HDDS-4805. [FSO]Fix findbugs issues after HDDS-2195 (#1906)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit db4f0844f6a19621fd1775d1be4aa02fe11d388c
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Mon Feb 8 19:29:09 2021 +0530

    HDDS-4805. [FSO]Fix findbugs issues after HDDS-2195 (#1906)
---
 .../apache/hadoop/fs/ozone/TestOzoneFileOps.java   | 15 +++-
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       | 60 ++++++++------
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 94 +++++++++++-----------
 .../hadoop/ozone/client/rpc/TestReadRetries.java   | 10 +--
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 39 ++++++---
 .../file/TestOMDirectoryCreateRequestV1.java       | 20 ++---
 .../om/request/key/TestOMKeyCreateRequestV1.java   |  5 +-
 .../TestS3InitiateMultipartUploadRequestV1.java    |  4 -
 .../file/TestOMDirectoryCreateResponseV1.java      |  2 -
 .../response/file/TestOMFileCreateResponseV1.java  |  6 +-
 .../om/response/key/TestOMKeyCommitResponse.java   |  2 -
 .../om/response/key/TestOMKeyCreateResponseV1.java |  6 +-
 .../TestS3InitiateMultipartUploadResponseV1.java   |  5 --
 13 files changed, 147 insertions(+), 121 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
index 12dd51e..176d0c4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -155,12 +156,20 @@ public class TestOzoneFileOps {
     // trigger CommitKeyRequest
     outputStream.close();
 
-    Assert.assertTrue("Failed to commit the open file:" + openFileKey,
-            omMgr.getOpenKeyTable().isEmpty());
-
     OmKeyInfo omKeyInfo = omMgr.getKeyTable().get(openFileKey);
     Assert.assertNotNull("Invalid Key!", omKeyInfo);
     verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
+
+    // wait for DB updates
+    GenericTestUtils.waitFor(() -> {
+      try {
+        return omMgr.getOpenKeyTable().isEmpty();
+      } catch (IOException e) {
+        LOG.error("DB failure!", e);
+        Assert.fail("DB failure!");
+        return false;
+      }
+    }, 1000, 120000);
   }
 
   private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 685aade..a4af35b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.ozone;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -125,26 +126,16 @@ public class TestOzoneFileSystem {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestOzoneFileSystem.class);
 
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static boolean isBucketFSOptimized = false;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static boolean enabledFileSystemPaths;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static boolean omRatisEnabled;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static MiniOzoneCluster cluster;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static FileSystem fs;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static OzoneFileSystem o3fs;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static String volumeName;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static String bucketName;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static int rootItemCount;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static Trash trash;
+  private static boolean isBucketFSOptimized = false;
+  private static boolean enabledFileSystemPaths;
+  private static boolean omRatisEnabled;
+
+  private static MiniOzoneCluster cluster;
+  private static FileSystem fs;
+  private static OzoneFileSystem o3fs;
+  private static String volumeName;
+  private static String bucketName;
+  private static Trash trash;
 
   private void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
@@ -202,6 +193,26 @@ public class TestOzoneFileSystem {
     }
   }
 
+  public static FileSystem getFs() {
+    return fs;
+  }
+
+  public static boolean isEnabledFileSystemPaths() {
+    return enabledFileSystemPaths;
+  }
+
+  public static void setIsBucketFSOptimized(boolean isBucketFSO) {
+    isBucketFSOptimized = isBucketFSO;
+  }
+
+  public static String getBucketName() {
+    return bucketName;
+  }
+
+  public static String getVolumeName() {
+    return volumeName;
+  }
+
   @Test
   public void testCreateFileShouldCheckExistenceOfDirWithSameName()
       throws Exception {
@@ -605,7 +616,7 @@ public class TestOzoneFileSystem {
     // Added logs for debugging failures, to check any sub-path mismatches.
     Set<String> actualPaths = new TreeSet<>();
     ArrayList<String> actualPathList = new ArrayList<>();
-    if (rootItemCount != fileStatuses.length) {
+    if (numDirs != fileStatuses.length) {
       for (int i = 0; i < fileStatuses.length; i++) {
         boolean duplicate =
                 actualPaths.add(fileStatuses[i].getPath().getName());
@@ -615,7 +626,7 @@ public class TestOzoneFileSystem {
         }
         actualPathList.add(fileStatuses[i].getPath().getName());
       }
-      if (rootItemCount != actualPathList.size()) {
+      if (numDirs != actualPathList.size()) {
         LOG.info("actualPathsSize: {}", actualPaths.size());
         LOG.info("actualPathListSize: {}", actualPathList.size());
         actualPaths.removeAll(paths);
@@ -642,8 +653,6 @@ public class TestOzoneFileSystem {
     Path root = new Path("/");
     FileStatus[] fileStatuses = fs.listStatus(root);
 
-    rootItemCount = 0; // reset to zero
-
     if (fileStatuses == null) {
       return;
     }
@@ -722,7 +731,7 @@ public class TestOzoneFileSystem {
   public void testAllocateMoreThanOneBlock() throws IOException {
     Path file = new Path("/file");
     String str = "TestOzoneFileSystemV1.testSeekOnFileLength";
-    byte[] strBytes = str.getBytes();
+    byte[] strBytes = str.getBytes(StandardCharsets.UTF_8);
     long numBlockAllocationsOrg =
             cluster.getOzoneManager().getMetrics().getNumBlockAllocates();
 
@@ -1051,7 +1060,6 @@ public class TestOzoneFileSystem {
   @Test
   public void testRenameDir() throws Exception {
     final String dir = "/root_dir/dir1";
-    Path rootDir = new Path(fs.getUri().toString() +  "/root_dir");
     final Path source = new Path(fs.getUri().toString() + dir);
     final Path dest = new Path(source.toString() + ".renamed");
     // Add a sub-dir to the directory to be moved.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index ffeb5a3..03846ae 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -29,9 +29,7 @@ import org.junit.Assert;
 import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
@@ -66,7 +64,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
 
   @BeforeClass
   public static void init() {
-    isBucketFSOptimized = true;
+    setIsBucketFSOptimized(true);
   }
 
   public TestOzoneFileSystemV1(boolean setDefaultFs, boolean enableOMRatis) {
@@ -85,12 +83,6 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     }
   }
 
-  /**
-   * Set a timeout for each test.
-   */
-  @Rule
-  public Timeout timeout = new Timeout(300000);
-
   private static final Logger LOG =
       LoggerFactory.getLogger(TestOzoneFileSystemV1.class);
 
@@ -106,30 +98,32 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
      * Op 7. create file -> /d1/d2/key1
      */
     Path key1 = new Path("/key1");
-    try (FSDataOutputStream outputStream = fs.create(key1, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(key1,
+            false)) {
       assertNotNull("Should be able to create file: key1",
               outputStream);
     }
     Path d1 = new Path("/d1");
     Path dir1Key1 = new Path(d1, "key1");
-    try (FSDataOutputStream outputStream = fs.create(dir1Key1, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(dir1Key1, false)) {
       assertNotNull("Should be able to create file: " + dir1Key1,
               outputStream);
     }
     Path d2 = new Path("/d2");
     Path dir2Key1 = new Path(d2, "key1");
-    try (FSDataOutputStream outputStream = fs.create(dir2Key1, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(dir2Key1, false)) {
       assertNotNull("Should be able to create file: " + dir2Key1,
               outputStream);
     }
     Path dir1Dir2 = new Path("/d1/d2/");
     Path dir1Dir2Key1 = new Path(dir1Dir2, "key1");
-    try (FSDataOutputStream outputStream = fs.create(dir1Dir2Key1, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(dir1Dir2Key1,
+            false)) {
       assertNotNull("Should be able to create file: " + dir1Dir2Key1,
               outputStream);
     }
     Path d1Key2 = new Path(d1, "key2");
-    try (FSDataOutputStream outputStream = fs.create(d1Key2, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(d1Key2, false)) {
       assertNotNull("Should be able to create file: " + d1Key2,
               outputStream);
     }
@@ -137,11 +131,14 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     Path dir1Dir3 = new Path("/d1/d3/");
     Path dir1Dir4 = new Path("/d1/d4/");
 
-    fs.mkdirs(dir1Dir3);
-    fs.mkdirs(dir1Dir4);
+    getFs().mkdirs(dir1Dir3);
+    getFs().mkdirs(dir1Dir4);
+
+    String bucketName = getBucketName();
+    String volumeName = getVolumeName();
 
     // Root Directory
-    FileStatus[] fileStatusList = fs.listStatus(new Path("/"));
+    FileStatus[] fileStatusList = getFs().listStatus(new Path("/"));
     assertEquals("FileStatus should return files and directories",
             3, fileStatusList.length);
     ArrayList<String> expectedPaths = new ArrayList<>();
@@ -155,7 +152,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
             0, expectedPaths.size());
 
     // level-1 sub-dirs
-    fileStatusList = fs.listStatus(new Path("/d1"));
+    fileStatusList = getFs().listStatus(new Path("/d1"));
     assertEquals("FileStatus should return files and directories",
             5, fileStatusList.length);
     expectedPaths = new ArrayList<>();
@@ -171,7 +168,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
             0, expectedPaths.size());
 
     // level-2 sub-dirs
-    fileStatusList = fs.listStatus(new Path("/d1/d2"));
+    fileStatusList = getFs().listStatus(new Path("/d1/d2"));
     assertEquals("FileStatus should return files and directories",
             1, fileStatusList.length);
     expectedPaths = new ArrayList<>();
@@ -184,7 +181,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
             0, expectedPaths.size());
 
     // level-2 key2
-    fileStatusList = fs.listStatus(new Path("/d1/d2/key1"));
+    fileStatusList = getFs().listStatus(new Path("/d1/d2/key1"));
     assertEquals("FileStatus should return files and directories",
             1, fileStatusList.length);
     expectedPaths = new ArrayList<>();
@@ -198,13 +195,13 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
 
     // invalid root key
     try {
-      fileStatusList = fs.listStatus(new Path("/key2"));
+      fileStatusList = getFs().listStatus(new Path("/key2"));
       fail("Should throw FileNotFoundException");
     } catch (FileNotFoundException fnfe) {
       // ignore as its expected
     }
     try {
-      fileStatusList = fs.listStatus(new Path("/d1/d2/key2"));
+      fileStatusList = getFs().listStatus(new Path("/d1/d2/key2"));
       fail("Should throw FileNotFoundException");
     } catch (FileNotFoundException fnfe) {
       // ignore as its expected
@@ -221,30 +218,30 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
      * Op 4. create dir -> /d1/d2/d1/d2/key1
      */
     Path dir1Dir1Dir2Key1 = new Path("/d1/d1/d2/key1");
-    try (FSDataOutputStream outputStream = fs.create(dir1Dir1Dir2Key1,
+    try (FSDataOutputStream outputStream = getFs().create(dir1Dir1Dir2Key1,
             false)) {
       assertNotNull("Should be able to create file: " + dir1Dir1Dir2Key1,
               outputStream);
     }
     Path key1 = new Path("/key1");
-    try (FSDataOutputStream outputStream = fs.create(key1, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(key1, false)) {
       assertNotNull("Should be able to create file: " + key1,
               outputStream);
     }
     Path key2 = new Path("/key2");
-    try (FSDataOutputStream outputStream = fs.create(key2, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(key2, false)) {
       assertNotNull("Should be able to create file: key2",
               outputStream);
     }
     Path dir1Dir2Dir1Dir2Key1 = new Path("/d1/d2/d1/d2/key1");
-    try (FSDataOutputStream outputStream = fs.create(dir1Dir2Dir1Dir2Key1,
+    try (FSDataOutputStream outputStream = getFs().create(dir1Dir2Dir1Dir2Key1,
             false)) {
       assertNotNull("Should be able to create file: "
               + dir1Dir2Dir1Dir2Key1, outputStream);
     }
-    RemoteIterator<LocatedFileStatus> fileStatusItr = fs.listFiles(new Path(
-            "/"), true);
-    String uriPrefix = "o3fs://" + bucketName + "." + volumeName;
+    RemoteIterator<LocatedFileStatus> fileStatusItr = getFs().listFiles(
+            new Path("/"), true);
+    String uriPrefix = "o3fs://" + getBucketName() + "." + getVolumeName();
     ArrayList<String> expectedPaths = new ArrayList<>();
     expectedPaths.add(uriPrefix + dir1Dir1Dir2Key1.toString());
     expectedPaths.add(uriPrefix + key1.toString());
@@ -263,7 +260,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
             expectedPaths.size());
 
     // Recursive=false
-    fileStatusItr = fs.listFiles(new Path("/"), false);
+    fileStatusItr = getFs().listFiles(new Path("/"), false);
     expectedPaths.clear();
     expectedPaths.add(uriPrefix + "/key1");
     expectedPaths.add(uriPrefix + "/key2");
@@ -286,23 +283,23 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   @Test
   public void testRenameWithNonExistentSource() throws Exception {
     // Skip as this will run only in new layout
-    if (!enabledFileSystemPaths) {
+    if (!isEnabledFileSystemPaths()) {
       return;
     }
 
     final String root = "/root";
     final String dir1 = root + "/dir1";
     final String dir2 = root + "/dir2";
-    final Path source = new Path(fs.getUri().toString() + dir1);
-    final Path destin = new Path(fs.getUri().toString() + dir2);
+    final Path source = new Path(getFs().getUri().toString() + dir1);
+    final Path destin = new Path(getFs().getUri().toString() + dir2);
 
     // creates destin
-    fs.mkdirs(destin);
+    getFs().mkdirs(destin);
     LOG.info("Created destin dir: {}", destin);
 
     LOG.info("Rename op-> source:{} to destin:{}}", source, destin);
     try {
-      fs.rename(source, destin);
+      getFs().rename(source, destin);
       Assert.fail("Should throw exception : Source doesn't exist!");
     } catch (OMException ome) {
       // expected
@@ -316,22 +313,22 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   @Test
   public void testRenameDirToItsOwnSubDir() throws Exception {
     // Skip as this will run only in new layout
-    if (!enabledFileSystemPaths) {
+    if (!isEnabledFileSystemPaths()) {
       return;
     }
 
     final String root = "/root";
     final String dir1 = root + "/dir1";
-    final Path dir1Path = new Path(fs.getUri().toString() + dir1);
+    final Path dir1Path = new Path(getFs().getUri().toString() + dir1);
     // Add a sub-dir1 to the directory to be moved.
     final Path subDir1 = new Path(dir1Path, "sub_dir1");
-    fs.mkdirs(subDir1);
+    getFs().mkdirs(subDir1);
     LOG.info("Created dir1 {}", subDir1);
 
-    final Path sourceRoot = new Path(fs.getUri().toString() + root);
+    final Path sourceRoot = new Path(getFs().getUri().toString() + root);
     LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
     try {
-      fs.rename(sourceRoot, subDir1);
+      getFs().rename(sourceRoot, subDir1);
       Assert.fail("Should throw exception : Cannot rename a directory to" +
               " its own subdirectory");
     } catch (OMException ome) {
@@ -348,20 +345,21 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   @Test
   public void testRenameDestinationParentDoesntExist() throws Exception {
     // Skip as this will run only in new layout
-    if (!enabledFileSystemPaths) {
+    if (!isEnabledFileSystemPaths()) {
       return;
     }
 
     final String root = "/root_dir";
     final String dir1 = root + "/dir1";
     final String dir2 = dir1 + "/dir2";
-    final Path dir2SourcePath = new Path(fs.getUri().toString() + dir2);
-    fs.mkdirs(dir2SourcePath);
+    final Path dir2SourcePath = new Path(getFs().getUri().toString() + dir2);
+    getFs().mkdirs(dir2SourcePath);
 
     // (a) parent of dst does not exist.  /root_dir/b/c
-    final Path destinPath = new Path(fs.getUri().toString() + root + "/b/c");
+    final Path destinPath = new Path(getFs().getUri().toString()
+            + root + "/b/c");
     try {
-      fs.rename(dir2SourcePath, destinPath);
+      getFs().rename(dir2SourcePath, destinPath);
       Assert.fail("Should fail as parent of dst does not exist!");
     } catch (OMException ome) {
       // expected
@@ -369,12 +367,12 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     }
 
     // (b) parent of dst is a file. /root_dir/file1/c
-    Path filePath = new Path(fs.getUri().toString() + root + "/file1");
-    ContractTestUtils.touch(fs, filePath);
+    Path filePath = new Path(getFs().getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(getFs(), filePath);
 
     Path newDestinPath = new Path(filePath, "c");
     try {
-      fs.rename(dir2SourcePath, newDestinPath);
+      getFs().rename(dir2SourcePath, newDestinPath);
       Assert.fail("Should fail as parent of dst is a file!");
     } catch (OMException ome) {
       // expected
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index 36146a2..5af6c63 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -87,11 +87,11 @@ public class TestReadRetries {
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-  private static OzoneManager ozoneManager;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
+  private MiniOzoneCluster cluster = null;
+  private OzoneClient ozClient = null;
+  private ObjectStore store = null;
+  private OzoneManager ozoneManager;
+  private StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
 
   private static final String SCM_ID = UUID.randomUUID().toString();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index d09020e..b877e29 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
 import org.junit.AfterClass;
@@ -50,8 +51,10 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.HashMap;
 import java.util.UUID;
+import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
 import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
@@ -175,7 +178,8 @@ public class TestObjectStoreV1 {
     verifyKeyInOpenFileTable(openFileTable, clientID, file,
             dirPathC.getObjectID(), false);
 
-    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0,
+            data.length());
     ozoneOutputStream.close();
 
     Table<String, OmKeyInfo> fileTable =
@@ -227,7 +231,8 @@ public class TestObjectStoreV1 {
     verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
             dirPathC.getObjectID(), false);
 
-    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0,
+            data.length());
 
     // open key
     try {
@@ -356,22 +361,21 @@ public class TestObjectStoreV1 {
   }
 
   private void assertKeyRenamedEx(OzoneBucket bucket, String keyName)
-      throws Exception {
-    OMException oe = null;
+          throws Exception {
     try {
       bucket.getKey(keyName);
-    } catch (OMException e) {
-      oe = e;
+      fail("Should throw KeyNotFound as the key got renamed!");
+    } catch (OMException ome) {
+      Assert.assertEquals(KEY_NOT_FOUND, ome.getResult());
     }
-    Assert.assertEquals(KEY_NOT_FOUND, oe.getResult());
   }
 
   private void createTestKey(OzoneBucket bucket, String keyName,
       String keyValue) throws IOException {
     OzoneOutputStream out = bucket.createKey(keyName,
-            keyValue.getBytes().length, STAND_ALONE,
+            keyValue.getBytes(StandardCharsets.UTF_8).length, STAND_ALONE,
             ONE, new HashMap<>());
-    out.write(keyValue.getBytes());
+    out.write(keyValue.getBytes(StandardCharsets.UTF_8));
     out.close();
     OzoneKey key = bucket.getKey(keyName);
     Assert.assertEquals(keyName, key.getName());
@@ -416,13 +420,24 @@ public class TestObjectStoreV1 {
 
   private void verifyKeyInOpenFileTable(Table<String, OmKeyInfo> openFileTable,
       long clientID, String fileName, long parentID, boolean isEmpty)
-          throws IOException {
+          throws IOException, TimeoutException, InterruptedException {
     String dbOpenFileKey =
             parentID + OM_KEY_PREFIX + fileName + OM_KEY_PREFIX + clientID;
-    OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
+
     if (isEmpty) {
-      Assert.assertNull("Table is not empty!", omKeyInfo);
+      // wait for DB updates
+      GenericTestUtils.waitFor(() -> {
+        try {
+          OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
+          return omKeyInfo == null;
+        } catch (IOException e) {
+          Assert.fail("DB failure!");
+          return false;
+        }
+
+      }, 1000, 120000);
     } else {
+      OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
       Assert.assertNotNull("Table is empty!", omKeyInfo);
       // used startsWith because the key format is,
       // <parentID>/fileName/<clientID> and clientID is not visible.
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
index 454cfbb..d8c8108 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.AuditMessage;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -369,8 +370,9 @@ public class TestOMDirectoryCreateRequestV1 {
             bucketName, keyName, HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.THREE, objID++);
     String ozoneFileName = parentID + "/" + dirs.get(dirs.size() - 1);
+    ++txnID;
     omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneFileName),
-            new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+            new CacheValue<>(Optional.of(omKeyInfo), txnID));
     omMetadataManager.getKeyTable().put(ozoneFileName, omKeyInfo);
 
     OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
@@ -435,10 +437,12 @@ public class TestOMDirectoryCreateRequestV1 {
     // Add a key in second level.
     OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
             bucketName, keyName, HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, objID++);
+            HddsProtos.ReplicationFactor.THREE, objID);
+
     String ozoneKey = parentID + "/" + dirs.get(1);
+    ++txnID;
     omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
-            new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+            new CacheValue<>(Optional.of(omKeyInfo), txnID));
     omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
 
     OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
@@ -517,10 +521,6 @@ public class TestOMDirectoryCreateRequestV1 {
     // Add volume and bucket entries to DB.
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
             omMetadataManager);
-    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-    OmBucketInfo omBucketInfo =
-            omMetadataManager.getBucketTable().get(bucketKey);
-    long bucketID = omBucketInfo.getObjectID();
 
     OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
             OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
@@ -587,12 +587,14 @@ public class TestOMDirectoryCreateRequestV1 {
   private String createDirKey(List<String> dirs, int depth) {
     String keyName = RandomStringUtils.randomAlphabetic(5);
     dirs.add(keyName);
+    StringBuffer buf = new StringBuffer(keyName);
     for (int i = 0; i < depth; i++) {
       String dirName = RandomStringUtils.randomAlphabetic(5);
       dirs.add(dirName);
-      keyName += "/" + dirName;
+      buf.append(OzoneConsts.OM_KEY_PREFIX);
+      buf.append(dirName);
     }
-    return keyName;
+    return buf.toString();
   }
 
   private void verifyDirectoriesInDB(List<String> dirs, long bucketID)
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
index b65443d..e545dc7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
@@ -75,7 +75,9 @@ public class TestOMKeyCreateRequestV1 extends TestOMKeyCreateRequest {
     long parentID = checkIntermediatePaths(keyPath);
 
     // Check open key entry
-    String fileName = keyPath.getFileName().toString();
+    Path keyPathFileName = keyPath.getFileName();
+    Assert.assertNotNull("Failed to find fileName", keyPathFileName);
+    String fileName = keyPathFileName.toString();
     String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
             omRequest.getCreateKeyRequest().getClientID());
     OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
@@ -88,6 +90,7 @@ public class TestOMKeyCreateRequestV1 extends TestOMKeyCreateRequest {
     String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
     OmBucketInfo omBucketInfo =
             omMetadataManager.getBucketTable().get(bucketKey);
+    Assert.assertNotNull("Bucket not found!", omBucketInfo);
     long lastKnownParentId = omBucketInfo.getObjectID();
 
     Iterator<Path> elements = keyPath.iterator();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
index dac2efe..5fa75ba 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
@@ -133,10 +133,6 @@ public class TestS3InitiateMultipartUploadRequestV1
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
-            .getKeyArgs().getMultipartUploadID());
-
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isEmpty());
     Assert.assertTrue(omMetadataManager.getMultipartInfoTable().isEmpty());
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
index 0a1114a..fb06581 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
@@ -59,9 +59,7 @@ public class TestOMDirectoryCreateResponseV1 {
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    String volumeName = UUID.randomUUID().toString();
     String keyName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
 
     long parentID = 100;
     OmDirectoryInfo omDirInfo =
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
index e1549e1..a8a0c99 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.util.Time;
 import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 
+import java.util.ArrayList;
+
 /**
  * Tests MKeyCreateResponse layout version V1.
  */
@@ -59,8 +61,8 @@ public class TestOMFileCreateResponseV1 extends TestOMKeyCreateResponse {
   protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
       OmBucketInfo bucketInfo, OMResponse response) {
 
-    return new OMFileCreateResponseV1(response, keyInfo, null, clientID,
-            bucketInfo);
+    return new OMFileCreateResponseV1(response, keyInfo, new ArrayList<>(),
+            clientID, bucketInfo);
   }
 
   @NotNull
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
index 4d50337..e2a223c 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
@@ -34,8 +34,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 @SuppressWarnings("visibilitymodifier")
 public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
-  protected OmBucketInfo omBucketInfo;
-
   @Test
   public void testAddToDBBatch() throws Exception {
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
index 6299639..834aafa 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.util.Time;
 import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 
+import java.util.ArrayList;
+
 /**
  * Tests OMKeyCreateResponseV1.
  */
@@ -67,7 +69,7 @@ public class TestOMKeyCreateResponseV1 extends TestOMKeyCreateResponse {
   protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
       OmBucketInfo bucketInfo, OMResponse response) {
 
-    return new OMKeyCreateResponseV1(response, keyInfo, null, clientID,
-            bucketInfo);
+    return new OMKeyCreateResponseV1(response, keyInfo,  new ArrayList<>(),
+            clientID, bucketInfo);
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java
index 31f9e5a..6dd6aaf 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java
@@ -40,11 +40,6 @@ public class TestS3InitiateMultipartUploadResponseV1
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     String prefix = "a/b/c/d/";
-    List<String> dirs = new ArrayList<String>();
-    dirs.add("a");
-    dirs.add("b");
-    dirs.add("c");
-    dirs.add("d");
     String fileName = UUID.randomUUID().toString();
     String keyName = prefix + fileName;
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 01/19: HDDS-2949: mkdir : store directory entries in a separate table (#1404)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 31b66732f03a9c6c2d31bbc548dc40a1688f4fc1
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Fri Oct 2 00:45:19 2020 +0530

    HDDS-2949: mkdir : store directory entries in a separate table (#1404)
---
 .../common/src/main/resources/ozone-default.xml    |  10 +
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   7 +
 .../hadoop/ozone/om/helpers/OmDirectoryInfo.java   | 266 +++++++++
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  15 +
 .../apache/hadoop/fs/ozone/TestOzoneDirectory.java | 207 +++++++
 .../src/main/proto/OmClientProtocol.proto          |  11 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  17 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  24 +
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  14 +
 .../ozone/om/codec/OmDirectoryInfoCodec.java       |  60 ++
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  18 +
 .../om/request/file/OMDirectoryCreateRequest.java  |   3 +
 ...equest.java => OMDirectoryCreateRequestV1.java} | 324 +++++-----
 .../ozone/om/request/file/OMFileRequest.java       | 198 +++++++
 .../response/file/OMDirectoryCreateResponseV1.java | 103 ++++
 .../ozone/om/request/TestOMRequestUtils.java       |  37 ++
 .../file/TestOMDirectoryCreateRequestV1.java       | 649 +++++++++++++++++++++
 .../file/TestOMDirectoryCreateResponseV1.java      |  88 +++
 18 files changed, 1863 insertions(+), 188 deletions(-)

diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 9311937..8008a4d 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2489,4 +2489,14 @@
       filesystem semantics.
     </description>
   </property>
+
+  <property>
+    <name>ozone.om.layout.version</name>
+    <tag>OZONE, OM</tag>
+    <value>V0</value>
+    <description>Temporary workaround for OM upgrade and will be replaced once
+      upgrade HDDS-3698 story reaches consensus. Defaulting to 'V0' so that
+      existing unit test cases won't be affected. New OM version should be 'V1'
+    </description>
+  </property>
 </configuration>
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 3ad4ab9..dcba74b 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -234,4 +234,11 @@ public final class OMConfigKeys {
       false;
 
   public static final String OZONE_OM_HA_PREFIX = "ozone.om.ha";
+
+  // TODO: Temporary workaround for OM upgrade path and will be replaced once
+  //  upgrade HDDS-3698 story reaches consensus. Defaulting to 'V0' so that
+  //  existing unit test cases won't be affected. New OM version should be 'V1'.
+  public static final String OZONE_OM_LAYOUT_VERSION =
+          "ozone.om.layout.version";
+  public static final String OZONE_OM_LAYOUT_VERSION_DEFAULT = "V0";
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
new file mode 100644
index 0000000..4c82047
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
@@ -0,0 +1,266 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+
+import java.util.*;
+
+/**
+ * This class represents the directory information by keeping each component
+ * in the user given path and a pointer to its parent directory element in the
+ * path. Also, it stores directory node related metdata details.
+ */
+public class OmDirectoryInfo extends WithObjectID {
+  private long parentObjectID; // pointer to parent directory
+
+  private String name; // directory name
+
+  private long creationTime;
+  private long modificationTime;
+
+  private List<OzoneAcl> acls;
+
+  public OmDirectoryInfo(Builder builder) {
+    this.name = builder.name;
+    this.acls = builder.acls;
+    this.metadata = builder.metadata;
+    this.objectID = builder.objectID;
+    this.updateID = builder.updateID;
+    this.parentObjectID = builder.parentObjectID;
+    this.creationTime = builder.creationTime;
+    this.modificationTime = builder.modificationTime;
+  }
+
+  /**
+   * Returns new builder class that builds a OmPrefixInfo.
+   *
+   * @return Builder
+   */
+  public static OmDirectoryInfo.Builder newBuilder() {
+    return new OmDirectoryInfo.Builder();
+  }
+
+  /**
+   * Builder for Directory Info.
+   */
+  public static class Builder {
+    private long parentObjectID; // pointer to parent directory
+
+    private long objectID;
+    private long updateID;
+
+    private String name;
+
+    private long creationTime;
+    private long modificationTime;
+
+    private List<OzoneAcl> acls;
+    private Map<String, String> metadata;
+
+    public Builder() {
+      //Default values
+      this.acls = new LinkedList<>();
+      this.metadata = new HashMap<>();
+    }
+
+    public Builder setParentObjectID(long parentObjectId) {
+      this.parentObjectID = parentObjectId;
+      return this;
+    }
+
+    public Builder setObjectID(long objectId) {
+      this.objectID = objectId;
+      return this;
+    }
+
+    public Builder setUpdateID(long updateId) {
+      this.updateID = updateId;
+      return this;
+    }
+
+    public Builder setName(String dirName) {
+      this.name = dirName;
+      return this;
+    }
+
+    public Builder setCreationTime(long newCreationTime) {
+      this.creationTime = newCreationTime;
+      return this;
+    }
+
+    public Builder setModificationTime(long newModificationTime) {
+      this.modificationTime = newModificationTime;
+      return this;
+    }
+
+    public Builder setAcls(List<OzoneAcl> listOfAcls) {
+      if (listOfAcls != null) {
+        this.acls.addAll(listOfAcls);
+      }
+      return this;
+    }
+
+    public Builder addAcl(OzoneAcl ozoneAcl) {
+      if (ozoneAcl != null) {
+        this.acls.add(ozoneAcl);
+      }
+      return this;
+    }
+
+    public Builder addMetadata(String key, String value) {
+      metadata.put(key, value);
+      return this;
+    }
+
+    public Builder addAllMetadata(Map<String, String> additionalMetadata) {
+      if (additionalMetadata != null) {
+        metadata.putAll(additionalMetadata);
+      }
+      return this;
+    }
+
+    public OmDirectoryInfo build() {
+      return new OmDirectoryInfo(this);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return getPath() + ":" + getObjectID();
+  }
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+  public String getPath() {
+    return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getName();
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  public long getModificationTime() {
+    return modificationTime;
+  }
+
+  public List<OzoneAcl> getAcls() {
+    return acls;
+  }
+
+  /**
+   * Creates DirectoryInfo protobuf from OmDirectoryInfo.
+   */
+  public OzoneManagerProtocolProtos.DirectoryInfo getProtobuf() {
+    OzoneManagerProtocolProtos.DirectoryInfo.Builder pib =
+            OzoneManagerProtocolProtos.DirectoryInfo.newBuilder().setName(name)
+                    .setCreationTime(creationTime)
+                    .setModificationTime(modificationTime)
+                    .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
+                    .setObjectID(objectID)
+                    .setUpdateID(updateID)
+                    .setParentID(parentObjectID);
+    if (acls != null) {
+      pib.addAllAcls(OzoneAclUtil.toProtobuf(acls));
+    }
+    return pib.build();
+  }
+
+  /**
+   * Parses DirectoryInfo protobuf and creates OmPrefixInfo.
+   * @param dirInfo
+   * @return instance of OmDirectoryInfo
+   */
+  public static OmDirectoryInfo getFromProtobuf(
+          OzoneManagerProtocolProtos.DirectoryInfo dirInfo) {
+    OmDirectoryInfo.Builder opib = OmDirectoryInfo.newBuilder()
+            .setName(dirInfo.getName())
+            .setCreationTime(dirInfo.getCreationTime())
+            .setModificationTime(dirInfo.getModificationTime())
+            .setAcls(OzoneAclUtil.fromProtobuf(dirInfo.getAclsList()));
+    if (dirInfo.getMetadataList() != null) {
+      opib.addAllMetadata(KeyValueUtil
+              .getFromProtobuf(dirInfo.getMetadataList()));
+    }
+    if (dirInfo.hasObjectID()) {
+      opib.setObjectID(dirInfo.getObjectID());
+    }
+    if (dirInfo.hasParentID()) {
+      opib.setParentObjectID(dirInfo.getParentID());
+    }
+    if (dirInfo.hasUpdateID()) {
+      opib.setUpdateID(dirInfo.getUpdateID());
+    }
+    return opib.build();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    OmDirectoryInfo omDirInfo = (OmDirectoryInfo) o;
+    return creationTime == omDirInfo.creationTime &&
+            modificationTime == omDirInfo.modificationTime &&
+            name.equals(omDirInfo.name) &&
+            Objects.equals(metadata, omDirInfo.metadata) &&
+            Objects.equals(acls, omDirInfo.acls) &&
+            objectID == omDirInfo.objectID &&
+            updateID == omDirInfo.updateID &&
+            parentObjectID == omDirInfo.parentObjectID;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(objectID, parentObjectID, name);
+  }
+
+  /**
+   * Return a new copy of the object.
+   */
+  public OmDirectoryInfo copyObject() {
+    OmDirectoryInfo.Builder builder = new Builder()
+            .setName(name)
+            .setCreationTime(creationTime)
+            .setModificationTime(modificationTime)
+            .setParentObjectID(parentObjectID)
+            .setObjectID(objectID)
+            .setUpdateID(updateID);
+
+    acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(),
+            acl.getName(), (BitSet) acl.getAclBitSet().clone(),
+            acl.getAclScope())));
+
+    if (metadata != null) {
+      metadata.forEach((k, v) -> builder.addMetadata(k, v));
+    }
+
+    return builder.build();
+  }
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index d1491ed..96df56f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.helpers;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.StringUtils;
 
+import javax.annotation.Nonnull;
 import java.nio.file.Paths;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
@@ -116,4 +117,18 @@ public final class OzoneFSUtils {
     }
     return true;
   }
+
+  /**
+   * The function returns leaf node name from the given absolute path. For
+   * example, the given key path '/a/b/c/d/e/file1' then it returns leaf node
+   * name 'file1'.
+   */
+  public static String getFileName(@Nonnull String keyName) {
+    java.nio.file.Path fileName = Paths.get(keyName).getFileName();
+    if (fileName != null) {
+      return fileName.toString();
+    }
+    // failed to converts a path key
+    return keyName;
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
new file mode 100644
index 0000000..87e9f09
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.Assert.fail;
+
+/**
+ * Test verifies the entries and operations in directory table.
+ */
+public class TestOzoneDirectory {
+
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestOzoneDirectory.class);
+
+  private MiniOzoneCluster cluster;
+  private FileSystem fs;
+  private OzoneFileSystem o3fs;
+  private String volumeName;
+  private String bucketName;
+
+  @Test(timeout = 300_000)
+  public void testMultiLevelDirs() throws Exception {
+    setupOzoneFileSystem();
+    // Op 1. create dir -> /d1/d2/d3/d4/
+    // Op 2. create dir -> /d1/d2/d3/d4/d5
+    // Op 3. create dir -> /d1/d2/d3/d4/d6
+    Path parent = new Path("/d1/d2/d3/d4/");
+    fs.mkdirs(parent);
+
+    OMMetadataManager omMgr = cluster.getOzoneManager().getMetadataManager();
+    OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(
+            omMgr.getBucketKey(volumeName, bucketName));
+    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
+
+    ArrayList<String> dirKeys = new ArrayList<>();
+    long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1",
+            dirKeys, omMgr);
+    long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys,
+            omMgr);
+    long d3ObjectID = verifyDirKey(d2ObjectID, "d3", "/d1/d2/d3",
+            dirKeys, omMgr);
+    long d4ObjectID = verifyDirKey(d3ObjectID, "d4", "/d1/d2/d3/d4",
+            dirKeys, omMgr);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            4, cluster.getOzoneManager().getMetrics().getNumKeys());
+
+    // verify entries in directory table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmDirectoryInfo>> iterator =
+            omMgr.getDirectoryTable().iterator();
+    iterator.seekToFirst();
+    int count = dirKeys.size();
+    Assert.assertEquals("Unexpected directory table entries!", 4, count);
+    while (iterator.hasNext()) {
+      count--;
+      Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
+      verifyKeyFormat(value.getKey(), dirKeys);
+    }
+    Assert.assertEquals("Unexpected directory table entries!", 0, count);
+
+    // verify entries in key table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmKeyInfo>> keyTableItr =
+            omMgr.getKeyTable().iterator();
+    while (keyTableItr.hasNext()) {
+      fail("Shouldn't add any entries in KeyTable!");
+    }
+
+    // create sub-dirs under same parent
+    Path subDir5 = new Path("/d1/d2/d3/d4/d5");
+    fs.mkdirs(subDir5);
+    Path subDir6 = new Path("/d1/d2/d3/d4/d6");
+    fs.mkdirs(subDir6);
+    long d5ObjectID = verifyDirKey(d4ObjectID, "d5",
+            "/d1/d2/d3/d4/d5", dirKeys, omMgr);
+    long d6ObjectID = verifyDirKey(d4ObjectID, "d6",
+            "/d1/d2/d3/d4/d6", dirKeys, omMgr);
+    Assert.assertTrue("Wrong objectIds for sub-dirs[" + d5ObjectID +
+                    "/d5, " + d6ObjectID + "/d6] of same parent!",
+            d5ObjectID != d6ObjectID);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            6, cluster.getOzoneManager().getMetrics().getNumKeys());
+  }
+
+  /**
+   * Verify key name format and the DB key existence in the expected dirKeys
+   * list.
+   *
+   * @param key     table keyName
+   * @param dirKeys expected keyName
+   */
+  private void verifyKeyFormat(String key, ArrayList<String> dirKeys) {
+    String[] keyParts = StringUtils.split(key,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
+    boolean removed = dirKeys.remove(key);
+    Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
+            removed);
+  }
+
+  long verifyDirKey(long parentId, String dirKey, String absolutePath,
+                    ArrayList<String> dirKeys, OMMetadataManager omMgr)
+          throws Exception {
+    String dbKey = parentId + "/" + dirKey;
+    dirKeys.add(dbKey);
+    OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey);
+    Assert.assertNotNull("Failed to find " + absolutePath +
+            " using dbKey: " + dbKey, dirInfo);
+    Assert.assertEquals("Parent Id mismatches", parentId,
+            dirInfo.getParentObjectID());
+    Assert.assertEquals("Mismatches directory name", dirKey,
+            dirInfo.getName());
+    Assert.assertTrue("Mismatches directory creation time param",
+            dirInfo.getCreationTime() > 0);
+    Assert.assertEquals("Mismatches directory modification time param",
+            dirInfo.getCreationTime(), dirInfo.getModificationTime());
+    Assert.assertEquals("Wrong representation!",
+            dbKey + ":" + dirInfo.getObjectID(), dirInfo.toString());
+    return dirInfo.getObjectID();
+  }
+
+  private void setupOzoneFileSystem()
+          throws IOException, TimeoutException, InterruptedException {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .build();
+    cluster.waitForClusterToBeReady();
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+            bucket.getVolumeName());
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+    fs = FileSystem.get(conf);
+  }
+
+  @After
+  public void tearDown() {
+    IOUtils.closeQuietly(fs);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index da43725..19385c9 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -776,6 +776,17 @@ message KeyInfo {
     optional uint64 updateID = 15;
 }
 
+message DirectoryInfo {
+    required string name = 1;
+    required uint64 creationTime = 2;
+    required uint64 modificationTime = 3;
+    repeated hadoop.hdds.KeyValue metadata = 4;
+    repeated OzoneAclInfo acls = 5;
+    required uint64 objectID = 6;
+    required uint64 updateID = 7;
+    required uint64 parentID = 8;
+}
+
 message RepeatedKeyInfo {
     repeated KeyInfo keyInfo = 1;
 }
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 824a654..c0948ce 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
@@ -374,6 +375,12 @@ public interface OMMetadataManager {
       String bucketName, String prefix) throws IOException;
 
   /**
+   * Gets the DirectoryTable.
+   * @return Table.
+   */
+  Table<String, OmDirectoryInfo> getDirectoryTable();
+
+  /**
    * Return table mapped to the specified table name.
    * @param tableName
    * @return Table
@@ -397,4 +404,14 @@ public interface OMMetadataManager {
 
   TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
       getKeyIterator();
+
+  /**
+   * Given a volume, bucket and a key, return the corresponding DB prefixKey
+   * key.
+   *
+   * @param parentObjectId - parent object Id
+   * @param pathComponentName   - path component name
+   * @return DB directory key as String.
+   */
+  String getOzonePathKey(long parentObjectId, String pathComponentName);
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index b1e7860..cdf5fcb 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.codec.OMTransactionInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec;
+import org.apache.hadoop.ozone.om.codec.OmDirectoryInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec;
@@ -60,6 +61,7 @@ import org.apache.hadoop.ozone.om.codec.UserVolumeInfoCodec;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@@ -126,6 +128,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * |  multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...   |
    * |----------------------------------------------------------------------|
+   * |  directoryTable    | parentId/directoryName -> DirectoryInfo         |
    * |----------------------------------------------------------------------|
    * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
    * |----------------------------------------------------------------------|
@@ -141,6 +144,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String S3_SECRET_TABLE = "s3SecretTable";
   public static final String DELEGATION_TOKEN_TABLE = "dTokenTable";
   public static final String PREFIX_TABLE = "prefixTable";
+  public static final String DIRECTORY_TABLE = "directoryTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
@@ -159,6 +163,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private Table s3SecretTable;
   private Table dTokenTable;
   private Table prefixTable;
+  private Table dirTable;
   private Table transactionInfoTable;
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
@@ -244,6 +249,11 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   }
 
   @Override
+  public Table<String, OmDirectoryInfo> getDirectoryTable() {
+    return dirTable;
+  }
+
+  @Override
   public Table<String, OmMultipartKeyInfo> getMultipartInfoTable() {
     return multipartInfoTable;
   }
@@ -335,6 +345,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addTable(DELEGATION_TOKEN_TABLE)
         .addTable(S3_SECRET_TABLE)
         .addTable(PREFIX_TABLE)
+        .addTable(DIRECTORY_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -346,6 +357,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addCodec(OmMultipartKeyInfo.class, new OmMultipartKeyInfoCodec())
         .addCodec(S3SecretValue.class, new S3SecretValueCodec())
         .addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec())
+        .addCodec(OmDirectoryInfo.class, new OmDirectoryInfoCodec())
         .addCodec(OMTransactionInfo.class, new OMTransactionInfoCodec());
   }
 
@@ -400,6 +412,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         OmPrefixInfo.class);
     checkTableStatus(prefixTable, PREFIX_TABLE);
 
+    dirTable = this.store.getTable(DIRECTORY_TABLE, String.class,
+            OmDirectoryInfo.class);
+    checkTableStatus(dirTable, DIRECTORY_TABLE);
+
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
         String.class, OMTransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
@@ -1165,4 +1181,12 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
     return tableMap.keySet();
   }
 
+  @Override
+  public String getOzonePathKey(long parentObjectId, String pathComponentName) {
+    StringBuilder builder = new StringBuilder();
+    builder.append(parentObjectId);
+    builder.append(OM_KEY_PREFIX).append(pathComponentName);
+    return builder.toString();
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 0b4b8ea..b59eedb 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -225,6 +225,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEF
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
@@ -1160,6 +1162,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       omRatisServer.start();
     }
 
+    // TODO: Temporary workaround for OM upgrade path and will be replaced once
+    //  upgrade HDDS-3698 story reaches consensus.
+    getOMLayoutVersion();
+
     metadataManager.start(configuration);
     startSecretManagerIfNecessary();
 
@@ -3702,6 +3708,14 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT);
   }
 
+  private void getOMLayoutVersion() {
+    String version = configuration.getTrimmed(OZONE_OM_LAYOUT_VERSION,
+            OZONE_OM_LAYOUT_VERSION_DEFAULT);
+    boolean omLayoutVersionV1 =
+            StringUtils.equalsIgnoreCase(version, "V1");
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(omLayoutVersionV1);
+  }
+
   /**
    * Create volume which is required for S3Gateway operations.
    * @throws IOException
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
new file mode 100644
index 0000000..ba592a9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.codec;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DirectoryInfo;
+
+import java.io.IOException;
+
+/**
+ * Codec to encode OmDirectoryInfo as byte array.
+ */
+public class OmDirectoryInfoCodec implements Codec<OmDirectoryInfo> {
+
+  @Override
+  public byte[] toPersistedFormat(OmDirectoryInfo object) throws IOException {
+    Preconditions
+            .checkNotNull(object, "Null object can't be converted " +
+                    "to byte array.");
+    return object.getProtobuf().toByteArray();
+  }
+
+  @Override
+  public OmDirectoryInfo fromPersistedFormat(byte[] rawData)
+          throws IOException {
+    Preconditions
+            .checkNotNull(rawData,
+                    "Null byte array can't converted to real object.");
+    try {
+      return OmDirectoryInfo.getFromProtobuf(DirectoryInfo.parseFrom(rawData));
+    } catch (InvalidProtocolBufferException e) {
+      throw new IllegalArgumentException(
+              "Can't encode the the raw data from the byte array", e);
+    }
+  }
+
+  @Override
+  public OmDirectoryInfo copyObject(OmDirectoryInfo object) {
+    return object.copyObject();
+  }
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 681c0da..1ea225b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketAddAclRequest;
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
@@ -83,8 +84,22 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_
  */
 public final class OzoneManagerRatisUtils {
 
+  // TODO: Temporary workaround for OM upgrade path and will be replaced once
+  //  upgrade HDDS-3698 story reaches consensus.
+  private static boolean omLayoutVersionV1 = true;
+
   private OzoneManagerRatisUtils() {
   }
+
+  /**
+   * Sets layout version.
+   *
+   * @param layoutVersionV1 om layout version
+   */
+  public static void setOmLayoutVersionV1(boolean layoutVersionV1) {
+    OzoneManagerRatisUtils.omLayoutVersionV1 = layoutVersionV1;
+  }
+
   /**
    * Create OMClientRequest which encapsulates the OMRequest.
    * @param omRequest
@@ -133,6 +148,9 @@ public final class OzoneManagerRatisUtils {
     case RenameKeys:
       return new OMKeysRenameRequest(omRequest);
     case CreateDirectory:
+      if (omLayoutVersionV1) {
+        return new OMDirectoryCreateRequestV1(omRequest);
+      }
       return new OMDirectoryCreateRequest(omRequest);
     case CreateFile:
       return new OMFileCreateRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index c14ca93..2be9efd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -361,4 +361,7 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
         .setUpdateID(objectId);
   }
 
+  static long getMaxNumOfRecursiveDirs() {
+    return MAX_NUM_OF_RECURSIVE_DIRS;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
similarity index 55%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
index c14ca93..8b0727a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
@@ -18,109 +18,64 @@
 
 package org.apache.hadoop.ozone.om.request.file;
 
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
 import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponse;
+import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateDirectoryRequest;
+        .CreateDirectoryRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateDirectoryResponse;
+        .CreateDirectoryResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
+        .KeyArgs;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
+        .OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
+        .OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .Status;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+        .Status;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.NONE;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*;
+
 /**
- * Handle create directory request.
+ * Handle create directory request. It will add path components to the directory
+ * table and maintains file system semantics.
  */
-public class OMDirectoryCreateRequest extends OMKeyRequest {
+public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(OMDirectoryCreateRequest.class);
-
-  // The maximum number of directories which can be created through a single
-  // transaction (recursive directory creations) is 2^8 - 1 as only 8
-  // bits are set aside for this in ObjectID.
-  private static final long MAX_NUM_OF_RECURSIVE_DIRS = 255;
-
-  /**
-   * Stores the result of request execution in
-   * OMClientRequest#validateAndUpdateCache.
-   */
-  public enum Result {
-    SUCCESS, // The request was executed successfully
-
-    DIRECTORY_ALREADY_EXISTS, // Directory key already exists in DB
+      LoggerFactory.getLogger(OMDirectoryCreateRequestV1.class);
 
-    FAILURE // The request failed and exception was thrown
-  }
-
-  public OMDirectoryCreateRequest(OMRequest omRequest) {
+  public OMDirectoryCreateRequestV1(OMRequest omRequest) {
     super(omRequest);
   }
 
   @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) {
-    CreateDirectoryRequest createDirectoryRequest =
-        getOmRequest().getCreateDirectoryRequest();
-    Preconditions.checkNotNull(createDirectoryRequest);
-
-    KeyArgs.Builder newKeyArgs = createDirectoryRequest.getKeyArgs()
-        .toBuilder().setModificationTime(Time.now());
-
-    CreateDirectoryRequest.Builder newCreateDirectoryRequest =
-        createDirectoryRequest.toBuilder().setKeyArgs(newKeyArgs);
-
-    return getOmRequest().toBuilder().setCreateDirectoryRequest(
-        newCreateDirectoryRequest).setUserInfo(getUserInfo()).build();
-
-  }
-
-  @Override
   public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
       long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
 
@@ -131,6 +86,7 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
     String volumeName = keyArgs.getVolumeName();
     String bucketName = keyArgs.getBucketName();
     String keyName = keyArgs.getKeyName();
+    int numKeysCreated = 0;
 
     OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
         getOmRequest());
@@ -147,8 +103,7 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
     IOException exception = null;
     OMClientResponse omClientResponse = null;
     Result result = Result.FAILURE;
-    List<OmKeyInfo> missingParentInfos;
-    int numMissingParents = 0;
+    List<OmDirectoryInfo> missingParentInfos;
 
     try {
       keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
@@ -175,48 +130,50 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
 
       // Need to check if any files exist in the given path, if they exist we
       // cannot create a directory with the given key.
-      OMFileRequest.OMPathInfo omPathInfo =
-          OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName,
+      // Verify the path against directory table
+      OMFileRequest.OMPathInfoV1 omPathInfo =
+          OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName,
               bucketName, keyName, keyPath);
       OMFileRequest.OMDirectoryResult omDirectoryResult =
           omPathInfo.getDirectoryResult();
 
-      OmKeyInfo dirKeyInfo = null;
       if (omDirectoryResult == FILE_EXISTS ||
           omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
-        throw new OMException("Unable to create directory: " +keyName
-            + " in volume/bucket: " + volumeName + "/" + bucketName,
+        throw new OMException("Unable to create directory: " + keyName
+            + " in volume/bucket: " + volumeName + "/" + bucketName + " as " +
+                "file:" + omPathInfo.getFileExistsInPath() + " already exists",
             FILE_ALREADY_EXISTS);
       } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH ||
           omDirectoryResult == NONE) {
-        List<String> missingParents = omPathInfo.getMissingParents();
-        long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex);
-        List<OzoneAcl> inheritAcls = omPathInfo.getAcls();
-
-        dirKeyInfo = createDirectoryKeyInfoWithACL(keyName,
-            keyArgs, baseObjId,
-            OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()), trxnLogIndex);
-
-        missingParentInfos = getAllParentInfo(ozoneManager, keyArgs,
-            missingParents, inheritAcls, trxnLogIndex);
-
-        numMissingParents = missingParentInfos.size();
-        OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName,
-            bucketName, Optional.of(dirKeyInfo),
-            Optional.of(missingParentInfos), trxnLogIndex);
-        result = Result.SUCCESS;
-        omClientResponse = new OMDirectoryCreateResponse(omResponse.build(),
-            dirKeyInfo, missingParentInfos, result);
+
+        // prepare all missing parents
+        missingParentInfos = OMDirectoryCreateRequestV1.getAllParentDirInfo(
+                ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+        // prepare leafNode dir
+        OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(
+                omPathInfo.getLeafNodeName(),
+                keyArgs, omPathInfo.getLeafNodeObjectId(),
+                omPathInfo.getLastKnownParentId(), trxnLogIndex,
+                OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
+        OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+                Optional.of(dirInfo), Optional.of(missingParentInfos),
+                trxnLogIndex);
+
+        // total number of keys created.
+        numKeysCreated = missingParentInfos.size() + 1;
+
+        result = OMDirectoryCreateRequest.Result.SUCCESS;
+        omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(),
+                dirInfo, missingParentInfos, result);
       } else {
-        // omDirectoryResult == DIRECTORY_EXITS
         result = Result.DIRECTORY_ALREADY_EXISTS;
         omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS);
-        omClientResponse = new OMDirectoryCreateResponse(omResponse.build(),
+        omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(),
             result);
       }
     } catch (IOException ex) {
       exception = ex;
-      omClientResponse = new OMDirectoryCreateResponse(
+      omClientResponse = new OMDirectoryCreateResponseV1(
           createErrorOMResponse(omResponse, exception), result);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
@@ -230,135 +187,126 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
     auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY,
         auditMap, exception, userInfo));
 
-    logResult(createDirectoryRequest, keyArgs, omMetrics, result,
-        exception, numMissingParents);
+    logResult(createDirectoryRequest, keyArgs, omMetrics, numKeysCreated,
+            result, exception);
 
     return omClientResponse;
   }
 
+  private void logResult(CreateDirectoryRequest createDirectoryRequest,
+                         KeyArgs keyArgs, OMMetrics omMetrics, int numKeys,
+                         Result result,
+                         IOException exception) {
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.incNumKeys(numKeys);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Directory created. Volume:{}, Bucket:{}, Key:{}",
+            volumeName, bucketName, keyName);
+      }
+      break;
+    case DIRECTORY_ALREADY_EXISTS:
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}",
+            volumeName, bucketName, keyName, exception);
+      }
+      break;
+    case FAILURE:
+      omMetrics.incNumCreateDirectoryFails();
+      LOG.error("Directory creation failed. Volume:{}, Bucket:{}, Key{}. " +
+          "Exception:{}", volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMDirectoryCreateRequest: {}",
+          createDirectoryRequest);
+    }
+  }
+
   /**
-   * Construct OmKeyInfo for every parent directory in missing list.
+   * Construct OmDirectoryInfo for every parent directory in missing list.
    * @param ozoneManager
    * @param keyArgs
-   * @param missingParents list of parent directories to be created
-   * @param inheritAcls ACLs to be assigned to each new parent dir
+   * @param pathInfo list of parent directories to be created and its ACLs
    * @param trxnLogIndex
    * @return
    * @throws IOException
    */
-  public static List<OmKeyInfo> getAllParentInfo(OzoneManager ozoneManager,
-      KeyArgs keyArgs, List<String> missingParents, List<OzoneAcl> inheritAcls,
-      long trxnLogIndex) throws IOException {
+  public static List<OmDirectoryInfo> getAllParentDirInfo(
+          OzoneManager ozoneManager, KeyArgs keyArgs,
+          OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex)
+          throws IOException {
     OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    List<OmKeyInfo> missingParentInfos = new ArrayList<>();
+    List<OmDirectoryInfo> missingParentInfos = new ArrayList<>();
 
     // The base id is left shifted by 8 bits for creating space to
     // create (2^8 - 1) object ids in every request.
     // maxObjId represents the largest object id allocation possible inside
     // the transaction.
     long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex);
-    long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS;
-    long objectCount = 1; // baseObjID is used by the leaf directory
+    long maxObjId = baseObjId + getMaxNumOfRecursiveDirs();
+    long objectCount = 1;
 
     String volumeName = keyArgs.getVolumeName();
     String bucketName = keyArgs.getBucketName();
     String keyName = keyArgs.getKeyName();
 
+    long lastKnownParentId = pathInfo.getLastKnownParentId();
+    List<String> missingParents = pathInfo.getMissingParents();
+    List<OzoneAcl> inheritAcls = pathInfo.getAcls();
     for (String missingKey : missingParents) {
       long nextObjId = baseObjId + objectCount;
       if (nextObjId > maxObjId) {
         throw new OMException("Too many directories in path. Exceeds limit of "
-            + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: "
+            + getMaxNumOfRecursiveDirs() + ". Unable to create directory: "
             + keyName + " in volume/bucket: " + volumeName + "/" + bucketName,
             INVALID_KEY_NAME);
       }
 
-      LOG.debug("missing parent {} getting added to KeyTable", missingKey);
-      // what about keyArgs for parent directories? TODO
-      OmKeyInfo parentKeyInfo = createDirectoryKeyInfoWithACL(
-          missingKey, keyArgs, nextObjId, inheritAcls, trxnLogIndex);
+      LOG.debug("missing parent {} getting added to DirectoryTable",
+              missingKey);
+      OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey,
+              keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, inheritAcls);
       objectCount++;
 
-      missingParentInfos.add(parentKeyInfo);
-      omMetadataManager.getKeyTable().addCacheEntry(
-          new CacheKey<>(omMetadataManager.getOzoneKey(volumeName,
-              bucketName, parentKeyInfo.getKeyName())),
-          new CacheValue<>(Optional.of(parentKeyInfo),
-              trxnLogIndex));
-    }
-
-    return missingParentInfos;
-  }
+      missingParentInfos.add(dirInfo);
 
-  private void logResult(CreateDirectoryRequest createDirectoryRequest,
-      KeyArgs keyArgs, OMMetrics omMetrics, Result result,
-      IOException exception, int numMissingParents) {
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-
-    switch (result) {
-    case SUCCESS:
-      // Count for the missing parents plus the directory being created.
-      omMetrics.incNumKeys(numMissingParents + 1);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Directory created. Volume:{}, Bucket:{}, Key:{}",
-            volumeName, bucketName, keyName);
-      }
-      break;
-    case DIRECTORY_ALREADY_EXISTS:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}",
-            volumeName, bucketName, keyName, exception);
-      }
-      break;
-    case FAILURE:
-      omMetrics.incNumCreateDirectoryFails();
-      LOG.error("Directory creation failed. Volume:{}, Bucket:{}, Key{}. " +
-          "Exception:{}", volumeName, bucketName, keyName, exception);
-      break;
-    default:
-      LOG.error("Unrecognized Result for OMDirectoryCreateRequest: {}",
-          createDirectoryRequest);
+      // updating id for the next sub-dir
+      lastKnownParentId = nextObjId;
     }
+    pathInfo.setLastKnownParentId(lastKnownParentId);
+    pathInfo.setLeafNodeObjectId(baseObjId + objectCount);
+    return missingParentInfos;
   }
 
   /**
-   * fill in a KeyInfo for a new directory entry in OM database.
+   * Fill in a DirectoryInfo for a new directory entry in OM database.
    * without initializing ACLs from the KeyArgs - used for intermediate
    * directories which get created internally/recursively during file
    * and directory create.
-   * @param keyName
+   * @param dirName
    * @param keyArgs
    * @param objectId
-   * @param transactionIndex
-   * @return the OmKeyInfo structure
+   * @param parentObjectId
+   * @param inheritAcls
+   * @return the OmDirectoryInfo structure
    */
-  public static OmKeyInfo createDirectoryKeyInfoWithACL(
-      String keyName, KeyArgs keyArgs, long objectId,
-      List<OzoneAcl> inheritAcls, long transactionIndex) {
-    return dirKeyInfoBuilderNoACL(keyName, keyArgs, objectId)
-        .setAcls(inheritAcls).setUpdateID(transactionIndex).build();
+  public static OmDirectoryInfo createDirectoryInfoWithACL(
+          String dirName, KeyArgs keyArgs, long objectId,
+          long parentObjectId, long transactionIndex,
+          List<OzoneAcl> inheritAcls) {
+
+    return OmDirectoryInfo.newBuilder()
+            .setName(dirName)
+            .setCreationTime(keyArgs.getModificationTime())
+            .setModificationTime(keyArgs.getModificationTime())
+            .setObjectID(objectId)
+            .setUpdateID(transactionIndex)
+            .setParentObjectID(parentObjectId)
+            .setAcls(inheritAcls).build();
   }
-
-  private static OmKeyInfo.Builder dirKeyInfoBuilderNoACL(String keyName,
-      KeyArgs keyArgs, long objectId) {
-    String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
-
-    return new OmKeyInfo.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(dirName)
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, new ArrayList<>())))
-        .setCreationTime(keyArgs.getModificationTime())
-        .setModificationTime(keyArgs.getModificationTime())
-        .setDataSize(0)
-        .setReplicationType(HddsProtos.ReplicationType.RATIS)
-        .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-        .setObjectID(objectId)
-        .setUpdateID(objectId);
-  }
-
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index f020f12..d5543ba 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -21,14 +21,19 @@ package org.apache.hadoop.ozone.om.request.file;
 import java.io.IOException;
 import java.nio.file.Path;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.base.Optional;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -127,6 +132,169 @@ public final class OMFileRequest {
   }
 
   /**
+   * Verify any dir/key exist in the given path in the specified
+   * volume/bucket by iterating through directory table.
+   *
+   * @param omMetadataManager OM Metadata manager
+   * @param volumeName        volume name
+   * @param bucketName        bucket name
+   * @param keyName           key name
+   * @param keyPath           path
+   * @return OMPathInfoV1 path info object
+   * @throws IOException on DB failure
+   */
+  public static OMPathInfoV1 verifyDirectoryKeysInPath(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull String volumeName,
+          @Nonnull String bucketName, @Nonnull String keyName,
+          @Nonnull Path keyPath) throws IOException {
+
+    String leafNodeName = OzoneFSUtils.getFileName(keyName);
+    List<String> missing = new ArrayList<>();
+
+    // Found no files/ directories in the given path.
+    OMDirectoryResult result = OMDirectoryResult.NONE;
+
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    // by default, inherit bucket ACLs
+    List<OzoneAcl> inheritAcls = omBucketInfo.getAcls();
+
+    long lastKnownParentId = omBucketInfo.getObjectID();
+    String dbDirName = ""; // absolute path for trace logs
+    // for better logging
+    StringBuilder fullKeyPath = new StringBuilder(bucketKey);
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+      fullKeyPath.append(OzoneConsts.OM_KEY_PREFIX);
+      fullKeyPath.append(fileName);
+      if (missing.size() > 0) {
+        // Add all the sub-dirs to the missing list except the leaf element.
+        // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt.
+        // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list.
+        if(elements.hasNext()){
+          // skips leaf node.
+          missing.add(fileName);
+        }
+        continue;
+      }
+
+      // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt
+      // 1. Do lookup on directoryTable. If not exists goto next step.
+      // 2. Do look on keyTable. If not exists goto next step.
+      // 3. Add 'sub-dir' to missing parents list
+      String dbNodeName = omMetadataManager.getOzonePathKey(
+              lastKnownParentId, fileName);
+      OmDirectoryInfo omDirInfo = omMetadataManager.getDirectoryTable().
+              get(dbNodeName);
+      if (omDirInfo != null) {
+        dbDirName += omDirInfo.getName() + OzoneConsts.OZONE_URI_DELIMITER;
+        if (elements.hasNext()) {
+          result = OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
+          lastKnownParentId = omDirInfo.getObjectID();
+          inheritAcls = omDirInfo.getAcls();
+          continue;
+        } else {
+          // Checked all the sub-dirs till the leaf node.
+          // Found a directory in the given path.
+          result = OMDirectoryResult.DIRECTORY_EXISTS;
+        }
+      } else {
+        // Get parentID from the lastKnownParent. For any files, directly under
+        // the bucket, the parent is the bucketID. Say, "/vol1/buck1/file1"
+        // TODO: Need to add UT for this case along with OMFileCreateRequest.
+        if (omMetadataManager.getKeyTable().isExist(dbNodeName)) {
+          if (elements.hasNext()) {
+            // Found a file in the given key name.
+            result = OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
+          } else {
+            // Checked all the sub-dirs till the leaf file.
+            // Found a file with the given key name.
+            result = OMDirectoryResult.FILE_EXISTS;
+          }
+          break; // Skip directory traversal as it hits key.
+        }
+
+        // Add to missing list, there is no such file/directory with given name.
+        if (elements.hasNext()) {
+          missing.add(fileName);
+        }
+      }
+    }
+
+    LOG.trace("verifyFiles/Directories in Path : " + "/" + volumeName
+            + "/" + bucketName + "/" + keyName + ":" + result);
+
+    if (result == OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH || result ==
+            OMDirectoryResult.FILE_EXISTS) {
+      return new OMPathInfoV1(leafNodeName, lastKnownParentId, missing,
+              result, inheritAcls, fullKeyPath.toString());
+    }
+
+    String dbDirKeyName = omMetadataManager.getOzoneDirKey(volumeName,
+            bucketName, dbDirName);
+    LOG.trace("Acls inherited from parent " + dbDirKeyName + " are : "
+            + inheritAcls);
+
+    return new OMPathInfoV1(leafNodeName, lastKnownParentId, missing,
+            result, inheritAcls);
+  }
+
+  /**
+   * Class to return the results from verifyDirectoryKeysInPath.
+   * Includes the list of missing intermediate directories and
+   * the directory search result code.
+   */
+  public static class OMPathInfoV1 extends OMPathInfo{
+    private String leafNodeName;
+    private long lastKnownParentId;
+    private long leafNodeObjectId;
+    private String fileExistsInPath;
+
+    public OMPathInfoV1(String leafNodeName, long lastKnownParentId,
+                        List missingParents, OMDirectoryResult result,
+                        List<OzoneAcl> aclList, String fileExistsInPath) {
+      super(missingParents, result, aclList);
+      this.leafNodeName = leafNodeName;
+      this.lastKnownParentId = lastKnownParentId;
+      this.fileExistsInPath = fileExistsInPath;
+    }
+
+    public OMPathInfoV1(String leafNodeName, long lastKnownParentId,
+                        List missingParents, OMDirectoryResult result,
+                        List<OzoneAcl> aclList) {
+      this(leafNodeName, lastKnownParentId, missingParents, result, aclList,
+              "");
+    }
+
+    public String getLeafNodeName() {
+      return leafNodeName;
+    }
+
+    public long getLeafNodeObjectId() {
+      return leafNodeObjectId;
+    }
+
+    public void setLeafNodeObjectId(long leafNodeObjectId) {
+      this.leafNodeObjectId = leafNodeObjectId;
+    }
+
+    public void setLastKnownParentId(long lastKnownParentId) {
+      this.lastKnownParentId = lastKnownParentId;
+    }
+
+    public long getLastKnownParentId() {
+      return lastKnownParentId;
+    }
+
+    public String getFileExistsInPath() {
+      return fileExistsInPath;
+    }
+  }
+
+  /**
    * Class to return the results from verifyFilesInPath.
    * Includes the list of missing intermediate directories and
    * the directory search result code.
@@ -224,4 +392,34 @@ public final class OMFileRequest {
           new CacheValue<>(keyInfo, index));
     }
   }
+
+  /**
+   * Adding directory info to the Table cache.
+   *
+   * @param omMetadataManager  OM Metdata Manager
+   * @param dirInfo            directory info
+   * @param missingParentInfos list of the parents to be added to DB
+   * @param trxnLogIndex       transaction log index
+   */
+  public static void addDirectoryTableCacheEntries(
+          OMMetadataManager omMetadataManager,
+          Optional<OmDirectoryInfo> dirInfo,
+          Optional<List<OmDirectoryInfo>> missingParentInfos,
+          long trxnLogIndex) {
+    for (OmDirectoryInfo subDirInfo : missingParentInfos.get()) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(omMetadataManager.getOzonePathKey(
+                      subDirInfo.getParentObjectID(), subDirInfo.getName())),
+              new CacheValue<>(Optional.of(subDirInfo), trxnLogIndex));
+    }
+
+    if (dirInfo.isPresent()) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(omMetadataManager.getOzonePathKey(
+                      dirInfo.get().getParentObjectID(),
+                      dirInfo.get().getName())),
+              new CacheValue<>(dirInfo, trxnLogIndex));
+    }
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java
new file mode 100644
index 0000000..4e93fa7
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+
+/**
+ * Response for create directory request.
+ */
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE})
+public class OMDirectoryCreateResponseV1 extends OMClientResponse {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(OMDirectoryCreateResponseV1.class);
+
+  private OmDirectoryInfo dirInfo;
+  private List<OmDirectoryInfo> parentDirInfos;
+  private Result result;
+
+  public OMDirectoryCreateResponseV1(@Nonnull OMResponse omResponse,
+                                     @Nonnull OmDirectoryInfo dirInfo,
+                                     @Nonnull List<OmDirectoryInfo> pDirInfos,
+                                     @Nonnull Result result) {
+    super(omResponse);
+    this.dirInfo = dirInfo;
+    this.parentDirInfos = pDirInfos;
+    this.result = result;
+  }
+
+  /**
+   * For when the request is not successful or the directory already exists.
+   */
+  public OMDirectoryCreateResponseV1(@Nonnull OMResponse omResponse,
+                                     @Nonnull Result result) {
+    super(omResponse);
+    this.result = result;
+  }
+
+  @Override
+  protected void addToDBBatch(OMMetadataManager omMetadataManager,
+                              BatchOperation batchOperation)
+          throws IOException {
+    addToDirectoryTable(omMetadataManager, batchOperation);
+  }
+
+  private void addToDirectoryTable(OMMetadataManager omMetadataManager,
+                                BatchOperation batchOperation)
+          throws IOException {
+    if (dirInfo != null) {
+      if (parentDirInfos != null) {
+        for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+          String parentKey = omMetadataManager
+                  .getOzonePathKey(parentDirInfo.getParentObjectID(),
+                          parentDirInfo.getName());
+          LOG.debug("putWithBatch parent : dir {} info : {}", parentKey,
+                  parentDirInfo);
+          omMetadataManager.getDirectoryTable()
+                  .putWithBatch(batchOperation, parentKey, parentDirInfo);
+        }
+      }
+
+      String dirKey = omMetadataManager.getOzonePathKey(
+              dirInfo.getParentObjectID(), dirInfo.getName());
+      omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, dirKey,
+              dirInfo);
+    } else {
+      // When directory already exists, we don't add it to cache. And it is
+      // not an error, in this case dirKeyInfo will be null.
+      LOG.debug("Response Status is OK, dirKeyInfo is null in " +
+              "OMDirectoryCreateResponseV1");
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index ff1f9c3..a6ca2cf 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -232,6 +234,25 @@ public final class TestOMRequestUtils {
   }
 
   /**
+   * Add dir key entry to DirectoryTable.
+   *
+   * @throws Exception
+   */
+  public static void addDirKeyToDirTable(boolean addToCache,
+                                         OmDirectoryInfo omDirInfo,
+                                         long trxnLogIndex,
+                                         OMMetadataManager omMetadataManager)
+          throws Exception {
+    String ozoneKey = omDirInfo.getPath();
+    if (addToCache) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(ozoneKey),
+              new CacheValue<>(Optional.of(omDirInfo), trxnLogIndex));
+    }
+    omMetadataManager.getDirectoryTable().put(ozoneKey, omDirInfo);
+  }
+
+  /**
    * Create OmKeyInfo.
    */
   public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
@@ -242,6 +263,22 @@ public final class TestOMRequestUtils {
   }
 
   /**
+   * Create OmDirectoryInfo.
+   */
+  public static OmDirectoryInfo createOmDirectoryInfo(String keyName,
+                                                      long objectID,
+                                                      long parentObjID) {
+    return new OmDirectoryInfo.Builder()
+            .setName(keyName)
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setObjectID(objectID)
+            .setParentObjectID(parentObjID)
+            .setUpdateID(objectID)
+            .build();
+  }
+
+  /**
    * Create OmKeyInfo.
    */
   public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
new file mode 100644
index 0000000..77cf74b
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -0,0 +1,649 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditMessage;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ResolvedBucket;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.jetbrains.annotations.NotNull;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test OM directory create request V1 layout version.
+ */
+public class TestOMDirectoryCreateRequestV1 {
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private OzoneManager ozoneManager;
+  private OMMetrics omMetrics;
+  private OMMetadataManager omMetadataManager;
+  private AuditLogger auditLogger;
+  // Just setting ozoneManagerDoubleBuffer which does nothing.
+  private OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
+          ((response, transactionIndex) -> {
+            return null;
+          });
+
+  @Before
+  public void setup() throws Exception {
+    ozoneManager = Mockito.mock(OzoneManager.class);
+    omMetrics = OMMetrics.create();
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+            folder.newFolder().getAbsolutePath());
+    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
+    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
+    auditLogger = Mockito.mock(AuditLogger.class);
+    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
+    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+    when(ozoneManager.resolveBucketLink(any(KeyArgs.class),
+            any(OMClientRequest.class)))
+            .thenReturn(new ResolvedBucket(Pair.of("", ""), Pair.of("", "")));
+  }
+
+  @After
+  public void stop() {
+    omMetrics.unRegister();
+    Mockito.framework().clearInlineMocks();
+  }
+
+  @Test
+  public void testPreExecute() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    String keyName = "a/b/c";
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirectoryCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest =
+            omDirectoryCreateRequestV1.preExecute(ozoneManager);
+
+    // As in preExecute, we modify original request.
+    Assert.assertNotEquals(omRequest, modifiedOmRequest);
+  }
+
+  @Test
+  public void testValidateAndUpdateCache() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.OK);
+    verifyDirectoriesInDB(dirs, bucketID);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(VOLUME_NOT_FOUND,
+            omClientResponse.getOMResponse().getStatus());
+
+    // Key should not exist in DB
+    Assert.assertTrue("Unexpected directory entries!",
+            omMetadataManager.getDirectoryTable().isEmpty());
+
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
+
+    // Key should not exist in DB
+    Assert.assertTrue("Unexpected directory entries!",
+            omMetadataManager.getDirectoryTable().isEmpty());
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithSubDirectoryInPath()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+    int objID = 100;
+
+    //1. Create root
+    OmDirectoryInfo omDirInfo =
+            TestOMRequestUtils.createOmDirectoryInfo(dirs.get(0), objID++,
+                    bucketID);
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000,
+            omMetadataManager);
+    //2. Create sub-directory under root
+    omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(dirs.get(1), objID++,
+            omDirInfo.getObjectID());
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000,
+            omMetadataManager);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.OK);
+
+    // Key should exist in DB and cache.
+    verifyDirectoriesInDB(dirs, bucketID);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithDirectoryAlreadyExists()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    // bucketID is the parent
+    long parentID = bucketID;
+
+    // add all the directories into DirectoryTable
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      long objID = 100 + indx;
+      long txnID = 5000 + indx;
+      // for index=0, parentID is bucketID
+      OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+              dirs.get(indx), objID, parentID);
+      TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo,
+              txnID, omMetadataManager);
+
+      parentID = omDirInfo.getObjectID();
+    }
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.DIRECTORY_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should exist in DB and doesn't added to cache.
+    verifyDirectoriesInDB(dirs, bucketID);
+    verifyDirectoriesNotInCache(dirs, bucketID);
+  }
+
+  /**
+   * Case: File exists with the same name as the requested directory.
+   * Say, requested to createDir '/a/b/c' and there is a file exists with
+   * same name.
+   */
+  @Test
+  public void testValidateAndUpdateCacheWithFilesInPath() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long parentID = omBucketInfo.getObjectID();
+
+    // add all the parent directories into DirectoryTable. This won't create
+    // the leaf node and this will be used in CreateDirectoryReq.
+    for (int indx = 0; indx < dirs.size() - 1; indx++) {
+      long objID = 100 + indx;
+      long txnID = 5000 + indx;
+      // for index=0, parentID is bucketID
+      OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+              dirs.get(indx), objID, parentID);
+      TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo,
+              txnID, omMetadataManager);
+
+      parentID = omDirInfo.getObjectID();
+    }
+
+    long objID = parentID + 100;
+    long txnID = 50000;
+
+    // Add a file into the FileTable, this is to simulate "file exists" check.
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE, objID++);
+    String ozoneFileName = parentID + "/" + dirs.get(dirs.size() - 1);
+    omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneFileName),
+            new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+    omMetadataManager.getKeyTable().put(ozoneFileName, omKeyInfo);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest =
+            omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should not exist in DB
+    Assert.assertNotNull(omMetadataManager.getKeyTable().get(ozoneFileName));
+    // Key should not exist in DB
+    Assert.assertEquals("Wrong directories count!", 3,
+            omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+  }
+
+
+  /**
+   * Case: File exists in the given path.
+   * Say, requested to createDir '/a/b/c/d' and there is a file '/a/b' exists
+   * in the given path.
+   */
+  @Test
+  public void testValidateAndUpdateCacheWithFileExistsInGivenPath()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long parentID = omBucketInfo.getObjectID();
+
+    long objID = parentID + 100;
+    long txnID = 5000;
+
+    // for index=0, parentID is bucketID
+    OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+            dirs.get(0), objID++, parentID);
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo,
+            txnID, omMetadataManager);
+    parentID = omDirInfo.getObjectID();
+
+    // Add a key in second level.
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE, objID++);
+    String ozoneKey = parentID + "/" + dirs.get(1);
+    omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
+            new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+    omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest =
+            omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue("Invalid response code:" +
+                    omClientResponse.getOMResponse().getStatus(),
+            omClientResponse.getOMResponse().getStatus()
+                    == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should not exist in DB
+    Assert.assertTrue(omMetadataManager.getKeyTable().get(ozoneKey) != null);
+    // Key should not exist in DB
+    Assert.assertEquals("Wrong directories count!",
+            1, omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+  }
+
+  @Test
+  public void testCreateDirectoryUptoLimitOfMaxLevels255() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 255);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    verifyDirectoriesInDB(dirs, bucketID);
+
+    Assert.assertEquals(dirs.size(), omMetrics.getNumKeys());
+  }
+
+  @Test
+  public void testCreateDirectoryExceedLimitOfMaxLevels255() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 256);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager,
+                    100L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME,
+            omClientResponse.getOMResponse().getStatus());
+
+    Assert.assertEquals("Unexpected directories!", 0,
+            omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+
+    Assert.assertEquals(0, omMetrics.getNumKeys());
+  }
+
+  @Test
+  public void testCreateDirectoryOMMetric() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    verifyDirectoriesInDB(dirs, bucketID);
+
+    Assert.assertEquals(dirs.size(), omMetrics.getNumKeys());
+  }
+
+
+  @NotNull
+  private String createDirKey(List<String> dirs, int depth) {
+    String keyName = RandomStringUtils.randomAlphabetic(5);
+    dirs.add(keyName);
+    for (int i = 0; i < depth; i++) {
+      String dirName = RandomStringUtils.randomAlphabetic(5);
+      dirs.add(dirName);
+      keyName += "/" + dirName;
+    }
+    return keyName;
+  }
+
+  private void verifyDirectoriesInDB(List<String> dirs, long bucketID)
+          throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      OmDirectoryInfo omDirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      Assert.assertNotNull("Invalid directory!", omDirInfo);
+      Assert.assertEquals("Invalid directory!", dirName, omDirInfo.getName());
+      Assert.assertEquals("Invalid dir path!",
+              parentID + "/" + dirName, omDirInfo.getPath());
+      parentID = omDirInfo.getObjectID();
+    }
+  }
+
+  private void verifyDirectoriesNotInCache(List<String> dirs, long bucketID)
+          throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      CacheValue<OmDirectoryInfo> omDirInfoCacheValue =
+              omMetadataManager.getDirectoryTable()
+                      .getCacheValue(new CacheKey<>(dbKey));
+      Assert.assertNull("Unexpected directory!", omDirInfoCacheValue);
+    }
+  }
+
+  /**
+   * Create OMRequest which encapsulates CreateDirectory request.
+   *
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @return OMRequest
+   */
+  private OMRequest createDirectoryRequest(String volumeName, String bucketName,
+                                           String keyName) {
+    return OMRequest.newBuilder().setCreateDirectoryRequest(
+            CreateDirectoryRequest.newBuilder().setKeyArgs(
+                    KeyArgs.newBuilder().setVolumeName(volumeName)
+                            .setBucketName(bucketName).setKeyName(keyName)))
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
+            .setClientId(UUID.randomUUID().toString()).build();
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
new file mode 100644
index 0000000..0a1114a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Tests OMDirectoryCreateResponseV1 new layout version.
+ */
+public class TestOMDirectoryCreateResponseV1 {
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private OMMetadataManager omMetadataManager;
+  private BatchOperation batchOperation;
+
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+        folder.newFolder().getAbsolutePath());
+    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+    batchOperation = omMetadataManager.getStore().initBatchOperation();
+  }
+
+  @Test
+  public void testAddToDBBatch() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+
+    long parentID = 100;
+    OmDirectoryInfo omDirInfo =
+            TestOMRequestUtils.createOmDirectoryInfo(keyName, 500, parentID);
+
+    OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse(
+        OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance())
+            .setStatus(OzoneManagerProtocolProtos.Status.OK)
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
+            .build();
+
+    OMDirectoryCreateResponseV1 omDirectoryCreateResponseV1 =
+        new OMDirectoryCreateResponseV1(omResponse, omDirInfo,
+            new ArrayList<>(), OMDirectoryCreateRequestV1.Result.SUCCESS);
+
+    omDirectoryCreateResponseV1.addToDBBatch(omMetadataManager, batchOperation);
+
+    // Do manual commit and see whether addToBatch is successful or not.
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    Assert.assertNotNull(omMetadataManager.getDirectoryTable().get(
+            omMetadataManager.getOzonePathKey(parentID, keyName)));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 02/19: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 4d2d64cb4259f4479cd6e2403e62c9a1f6f10b90
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Tue Oct 13 22:48:35 2020 +0530

    HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)
---
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  | 109 +++++++-
 .../apache/hadoop/fs/ozone/TestOzoneFileOps.java   | 231 +++++++++++++++++
 .../src/main/proto/OmClientProtocol.proto          |   1 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  15 +-
 .../hadoop/ozone/om/codec/OmKeyInfoCodec.java      |   8 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  37 ++-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  19 +-
 .../request/file/OMDirectoryCreateRequestV1.java   |  21 +-
 .../ozone/om/request/file/OMFileCreateRequest.java |  53 ++--
 .../om/request/file/OMFileCreateRequestV1.java     | 258 +++++++++++++++++++
 .../ozone/om/request/file/OMFileRequest.java       | 132 +++++++++-
 .../ozone/om/request/key/OMKeyCommitRequest.java   |  34 ++-
 ...ommitRequest.java => OMKeyCommitRequestV1.java} | 254 ++++++++++---------
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  | 278 ++++++++++++---------
 .../om/response/file/OMFileCreateResponseV1.java   |  85 +++++++
 .../ozone/om/response/key/OMKeyCommitResponse.java |  15 ++
 ...mitResponse.java => OMKeyCommitResponseV1.java} |  53 ++--
 .../ozone/om/response/key/OMKeyCreateResponse.java |  14 +-
 .../ozone/om/request/TestOMRequestUtils.java       | 110 ++++++++
 .../file/TestOMDirectoryCreateRequestV1.java       |   1 +
 .../om/request/file/TestOMFileCreateRequest.java   |  91 ++++---
 .../om/request/file/TestOMFileCreateRequestV1.java | 192 ++++++++++++++
 .../om/request/key/TestOMKeyCommitRequest.java     |  74 ++++--
 .../om/request/key/TestOMKeyCommitRequestV1.java   | 106 ++++++++
 .../ozone/om/request/key/TestOMKeyRequest.java     |   9 +-
 .../response/file/TestOMFileCreateResponseV1.java  |  73 ++++++
 .../om/response/key/TestOMKeyCommitResponse.java   |  66 +++--
 .../om/response/key/TestOMKeyCommitResponseV1.java | 101 ++++++++
 .../om/response/key/TestOMKeyCreateResponse.java   |  38 +--
 .../ozone/om/response/key/TestOMKeyResponse.java   |  25 +-
 30 files changed, 2089 insertions(+), 414 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 43213a9..b6fb404 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -25,9 +25,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
@@ -52,6 +54,8 @@ public final class OmKeyInfo extends WithObjectID {
   private HddsProtos.ReplicationType type;
   private HddsProtos.ReplicationFactor factor;
   private FileEncryptionInfo encInfo;
+  private String fileName; // leaf node name
+  private long parentObjectID; // pointer to parent directory
 
   /**
    * ACL Information.
@@ -94,6 +98,22 @@ public final class OmKeyInfo extends WithObjectID {
     this.updateID = updateID;
   }
 
+  @SuppressWarnings("parameternumber")
+  OmKeyInfo(String volumeName, String bucketName, String keyName,
+            String fileName, List<OmKeyLocationInfoGroup> versions,
+            long dataSize, long creationTime, long modificationTime,
+            HddsProtos.ReplicationType type,
+            HddsProtos.ReplicationFactor factor,
+            Map<String, String> metadata,
+            FileEncryptionInfo encInfo, List<OzoneAcl> acls,
+            long parentObjectID, long objectID, long updateID) {
+    this(volumeName, bucketName, keyName, versions, dataSize,
+            creationTime, modificationTime, type, factor, metadata, encInfo,
+            acls, objectID, updateID);
+    this.fileName = fileName;
+    this.parentObjectID = parentObjectID;
+  }
+
   public String getVolumeName() {
     return volumeName;
   }
@@ -126,6 +146,19 @@ public final class OmKeyInfo extends WithObjectID {
     this.dataSize = size;
   }
 
+  public void setFileName(String fileName) {
+    this.fileName = fileName;
+  }
+
+  public String getFileName() {
+    return fileName;
+  }
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+
   public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() {
     return keyLocationVersions.size() == 0? null :
         keyLocationVersions.get(keyLocationVersions.size() - 1);
@@ -272,6 +305,9 @@ public final class OmKeyInfo extends WithObjectID {
     private List<OzoneAcl> acls;
     private long objectID;
     private long updateID;
+    // not persisted to DB. FileName will be the last element in path keyName.
+    private String fileName;
+    private long parentObjectID;
 
     public Builder() {
       this.metadata = new HashMap<>();
@@ -374,11 +410,22 @@ public final class OmKeyInfo extends WithObjectID {
       return this;
     }
 
+    public Builder setFileName(String keyFileName) {
+      this.fileName = keyFileName;
+      return this;
+    }
+
+    public Builder setParentObjectID(long parentID) {
+      this.parentObjectID = parentID;
+      return this;
+    }
+
     public OmKeyInfo build() {
       return new OmKeyInfo(
-          volumeName, bucketName, keyName, omKeyLocationInfoGroups,
-          dataSize, creationTime, modificationTime, type, factor, metadata,
-          encInfo, acls, objectID, updateID);
+              volumeName, bucketName, keyName, fileName,
+              omKeyLocationInfoGroups, dataSize, creationTime,
+              modificationTime, type, factor, metadata, encInfo, acls,
+              parentObjectID, objectID, updateID);
     }
   }
 
@@ -391,11 +438,33 @@ public final class OmKeyInfo extends WithObjectID {
   }
 
   /**
+   * For network transmit.
+   *
+   * @param fullKeyName the user given full key name
+   * @return key info with the user given full key name
+   */
+  public KeyInfo getProtobuf(String fullKeyName, int clientVersion) {
+    return getProtobuf(false, fullKeyName, clientVersion);
+  }
+
+  /**
    *
    * @param ignorePipeline true for persist to DB, false for network transmit.
    * @return
    */
   public KeyInfo getProtobuf(boolean ignorePipeline, int clientVersion) {
+    return getProtobuf(ignorePipeline, null, clientVersion);
+  }
+
+  /**
+   * Gets KeyInfo with the user given key name.
+   *
+   * @param ignorePipeline   ignore pipeline flag
+   * @param fullKeyName user given key name
+   * @return key info object
+   */
+  private KeyInfo getProtobuf(boolean ignorePipeline, String fullKeyName,
+                              int clientVersion) {
     long latestVersion = keyLocationVersions.size() == 0 ? -1 :
         keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
 
@@ -408,7 +477,6 @@ public final class OmKeyInfo extends WithObjectID {
     KeyInfo.Builder kb = KeyInfo.newBuilder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
-        .setKeyName(keyName)
         .setDataSize(dataSize)
         .setFactor(factor)
         .setType(type)
@@ -419,7 +487,13 @@ public final class OmKeyInfo extends WithObjectID {
         .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
         .addAllAcls(OzoneAclUtil.toProtobuf(acls))
         .setObjectID(objectID)
-        .setUpdateID(updateID);
+        .setUpdateID(updateID)
+        .setParentID(parentObjectID);
+    if (StringUtils.isNotBlank(fullKeyName)) {
+      kb.setKeyName(fullKeyName);
+    } else {
+      kb.setKeyName(keyName);
+    }
     if (encInfo != null) {
       kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo));
     }
@@ -457,6 +531,11 @@ public final class OmKeyInfo extends WithObjectID {
     if (keyInfo.hasUpdateID()) {
       builder.setUpdateID(keyInfo.getUpdateID());
     }
+    if (keyInfo.hasParentID()) {
+      builder.setParentObjectID(keyInfo.getParentID());
+    }
+    // not persisted to DB. FileName will be filtered out from keyName
+    builder.setFileName(OzoneFSUtils.getFileName(keyInfo.getKeyName()));
     return builder.build();
   }
 
@@ -470,6 +549,8 @@ public final class OmKeyInfo extends WithObjectID {
         ", creationTime='" + creationTime + '\'' +
         ", type='" + type + '\'' +
         ", factor='" + factor + '\'' +
+        ", objectID='" + objectID + '\'' +
+        ", parentID='" + parentObjectID + '\'' +
         '}';
   }
 
@@ -495,12 +576,13 @@ public final class OmKeyInfo extends WithObjectID {
         Objects.equals(metadata, omKeyInfo.metadata) &&
         Objects.equals(acls, omKeyInfo.acls) &&
         objectID == omKeyInfo.objectID &&
-        updateID == omKeyInfo.updateID;
+        updateID == omKeyInfo.updateID &&
+        parentObjectID == omKeyInfo.parentObjectID;
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(volumeName, bucketName, keyName);
+    return Objects.hash(volumeName, bucketName, keyName, parentObjectID);
   }
 
   /**
@@ -517,8 +599,10 @@ public final class OmKeyInfo extends WithObjectID {
         .setReplicationType(type)
         .setReplicationFactor(factor)
         .setFileEncryptionInfo(encInfo)
-        .setObjectID(objectID).setUpdateID(updateID);
-
+        .setObjectID(objectID)
+        .setUpdateID(updateID)
+        .setParentObjectID(parentObjectID)
+        .setFileName(fileName);
 
     keyLocationVersions.forEach(keyLocationVersion ->
         builder.addOmKeyLocationInfoGroup(
@@ -546,4 +630,11 @@ public final class OmKeyInfo extends WithObjectID {
   public void clearFileEncryptionInfo() {
     this.encInfo = null;
   }
+
+  public String getPath() {
+    if (StringUtils.isBlank(getFileName())) {
+      return getKeyName();
+    }
+    return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getFileName();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
new file mode 100644
index 0000000..d097268
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+
+/**
+ * Test verifies the entries and operations in file table, open file table etc.
+ */
+public class TestOzoneFileOps {
+
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestOzoneFileOps.class);
+
+  private MiniOzoneCluster cluster;
+  private FileSystem fs;
+  private String volumeName;
+  private String bucketName;
+
+  @Before
+  public void setupOzoneFileSystem()
+          throws IOException, TimeoutException, InterruptedException {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, false);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .build();
+    cluster.waitForClusterToBeReady();
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+            bucket.getVolumeName());
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+    fs = FileSystem.get(conf);
+  }
+
+  @After
+  public void tearDown() {
+    IOUtils.closeQuietly(fs);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test(timeout = 300_000)
+  public void testCreateFile() throws Exception {
+    // Op 1. create dir -> /d1/d2/d3/d4/
+    Path parent = new Path("/d1/d2/");
+    Path file = new Path(parent, "file1");
+    FSDataOutputStream outputStream = fs.create(file);
+    String openFileKey = "";
+
+    OMMetadataManager omMgr = cluster.getOzoneManager().getMetadataManager();
+    OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(
+            omMgr.getBucketKey(volumeName, bucketName));
+    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
+
+    ArrayList<String> dirKeys = new ArrayList<>();
+    long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1",
+            dirKeys, omMgr);
+    long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys,
+            omMgr);
+    openFileKey = d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
+
+    // verify entries in directory table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmDirectoryInfo>> iterator =
+            omMgr.getDirectoryTable().iterator();
+    iterator.seekToFirst();
+    int count = dirKeys.size();
+    Assert.assertEquals("Unexpected directory table entries!", 2, count);
+    while (iterator.hasNext()) {
+      count--;
+      Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
+      verifyKeyFormat(value.getKey(), dirKeys);
+    }
+    Assert.assertEquals("Unexpected directory table entries!", 0, count);
+
+    // verify entries in open key table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmKeyInfo>> keysItr =
+            omMgr.getOpenKeyTable().iterator();
+    keysItr.seekToFirst();
+
+    while (keysItr.hasNext()) {
+      count++;
+      Table.KeyValue<String, OmKeyInfo> value = keysItr.next();
+      verifyOpenKeyFormat(value.getKey(), openFileKey);
+      verifyOMFileInfoFormat(value.getValue(), file.getName(), d2ObjectID);
+    }
+    Assert.assertEquals("Unexpected file table entries!", 1, count);
+
+    // trigger CommitKeyRequest
+    outputStream.close();
+
+    Assert.assertTrue("Failed to commit the open file:" + openFileKey,
+            omMgr.getOpenKeyTable().isEmpty());
+
+    OmKeyInfo omKeyInfo = omMgr.getKeyTable().get(openFileKey);
+    Assert.assertNotNull("Invalid Key!", omKeyInfo);
+    verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
+  }
+
+  private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName,
+                                      long parentID) {
+    Assert.assertEquals("Wrong keyName", fileName,
+            omKeyInfo.getKeyName());
+    Assert.assertEquals("Wrong parentID", parentID,
+            omKeyInfo.getParentObjectID());
+    String dbKey = parentID + OzoneConsts.OM_KEY_PREFIX + fileName;
+    Assert.assertEquals("Wrong path format", dbKey,
+            omKeyInfo.getPath());
+  }
+
+  /**
+   * Verify key name format and the DB key existence in the expected dirKeys
+   * list.
+   *
+   * @param key     table keyName
+   * @param dirKeys expected keyName
+   */
+  private void verifyKeyFormat(String key, ArrayList<String> dirKeys) {
+    String[] keyParts = StringUtils.split(key,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
+    boolean removed = dirKeys.remove(key);
+    Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
+            removed);
+  }
+
+  /**
+   * Verify key name format and the DB key existence in the expected
+   * openFileKeys list.
+   *
+   * @param key          table keyName
+   * @param openFileKey expected keyName
+   */
+  private void verifyOpenKeyFormat(String key, String openFileKey) {
+    String[] keyParts = StringUtils.split(key,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("Invalid KeyName:" + key, 3, keyParts.length);
+    String[] expectedOpenFileParts = StringUtils.split(openFileKey,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("ParentId/Key:" + expectedOpenFileParts[0]
+                    + " doesn't exists in openFileTable!",
+            expectedOpenFileParts[0] + OzoneConsts.OM_KEY_PREFIX
+                    + expectedOpenFileParts[1],
+            keyParts[0] + OzoneConsts.OM_KEY_PREFIX + keyParts[1]);
+  }
+
+  long verifyDirKey(long parentId, String dirKey, String absolutePath,
+                    ArrayList<String> dirKeys, OMMetadataManager omMgr)
+          throws Exception {
+    String dbKey = parentId + OzoneConsts.OM_KEY_PREFIX + dirKey;
+    dirKeys.add(dbKey);
+    OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey);
+    Assert.assertNotNull("Failed to find " + absolutePath +
+            " using dbKey: " + dbKey, dirInfo);
+    Assert.assertEquals("Parent Id mismatches", parentId,
+            dirInfo.getParentObjectID());
+    Assert.assertEquals("Mismatches directory name", dirKey,
+            dirInfo.getName());
+    Assert.assertTrue("Mismatches directory creation time param",
+            dirInfo.getCreationTime() > 0);
+    Assert.assertEquals("Mismatches directory modification time param",
+            dirInfo.getCreationTime(), dirInfo.getModificationTime());
+    return dirInfo.getObjectID();
+  }
+
+}
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 19385c9..6707e08 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -774,6 +774,7 @@ message KeyInfo {
     repeated OzoneAclInfo acls = 13;
     optional uint64 objectID = 14;
     optional uint64 updateID = 15;
+    optional uint64 parentID = 16;
 }
 
 message DirectoryInfo {
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index c0948ce..caa8ed7 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -406,12 +406,23 @@ public interface OMMetadataManager {
       getKeyIterator();
 
   /**
-   * Given a volume, bucket and a key, return the corresponding DB prefixKey
-   * key.
+   * Given parent object id and path component name, return the corresponding
+   * DB 'prefixKey' key.
    *
    * @param parentObjectId - parent object Id
    * @param pathComponentName   - path component name
    * @return DB directory key as String.
    */
   String getOzonePathKey(long parentObjectId, String pathComponentName);
+
+  /**
+   * Returns DB key name of an open file in OM metadata store. Should be
+   * #open# prefix followed by actual leaf node name.
+   *
+   * @param parentObjectId - parent object Id
+   * @param fileName       - file name
+   * @param id             - client id for this open request
+   * @return DB directory key as String.
+   */
+  String getOpenFileName(long parentObjectId, String fileName, long id);
 }
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
index 3d8ace3..5855afc 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
@@ -32,6 +32,14 @@ import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
 
 /**
  * Codec to encode OmKeyInfo as byte array.
+ *
+ * <p>
+ * If the layout version "ozone.om.layout.version" is V1 and
+ * "ozone.om.enable.filesystem.paths" is TRUE. Then, DB stores only the leaf
+ * node name into the 'keyName' field.
+ * <p>
+ * For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+ * 'keyName' field stores only the leaf node name, which is 'file1'.
  */
 public class OmKeyInfoCodec implements Codec<OmKeyInfo> {
   private static final Logger LOG =
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index cdf5fcb..22bbc20 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
 import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
 import org.apache.hadoop.ozone.storage.proto
     .OzoneManagerStorageProtos.PersistedUserVolumeInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -130,6 +131,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * |  directoryTable    | parentId/directoryName -> DirectoryInfo         |
    * |----------------------------------------------------------------------|
+   * |  fileTable         | parentId/fileName -> KeyInfo                |
+   * |----------------------------------------------------------------------|
+   * |  openFileTable     | parentId/fileName/id -> KeyInfo                   |
+   * |----------------------------------------------------------------------|
    * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
    * |----------------------------------------------------------------------|
    */
@@ -145,6 +150,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String DELEGATION_TOKEN_TABLE = "dTokenTable";
   public static final String PREFIX_TABLE = "prefixTable";
   public static final String DIRECTORY_TABLE = "directoryTable";
+  public static final String FILE_TABLE = "fileTable";
+  public static final String OPEN_FILE_TABLE = "openFileTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
@@ -164,6 +171,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private Table dTokenTable;
   private Table prefixTable;
   private Table dirTable;
+  private Table fileTable;
+  private Table openFileTable;
   private Table transactionInfoTable;
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
@@ -202,7 +211,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * For subclass overriding.
    */
   protected OmMetadataManagerImpl() {
-    this.lock = new OzoneManagerLock(new OzoneConfiguration());
+    OzoneConfiguration conf = new OzoneConfiguration();
+    this.lock = new OzoneManagerLock(conf);
     this.openKeyExpireThresholdMS =
         OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
     this.omEpoch = 0;
@@ -230,6 +240,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmKeyInfo> getKeyTable() {
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return fileTable;
+    }
     return keyTable;
   }
 
@@ -240,6 +253,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmKeyInfo> getOpenKeyTable() {
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return openFileTable;
+    }
     return openKeyTable;
   }
 
@@ -346,6 +362,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addTable(S3_SECRET_TABLE)
         .addTable(PREFIX_TABLE)
         .addTable(DIRECTORY_TABLE)
+        .addTable(FILE_TABLE)
+        .addTable(OPEN_FILE_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -416,6 +434,14 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
             OmDirectoryInfo.class);
     checkTableStatus(dirTable, DIRECTORY_TABLE);
 
+    fileTable = this.store.getTable(FILE_TABLE, String.class,
+            OmKeyInfo.class);
+    checkTableStatus(fileTable, FILE_TABLE);
+
+    openFileTable = this.store.getTable(OPEN_FILE_TABLE, String.class,
+            OmKeyInfo.class);
+    checkTableStatus(openFileTable, OPEN_FILE_TABLE);
+
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
         String.class, OMTransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
@@ -1189,4 +1215,13 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
     return builder.toString();
   }
 
+  @Override
+  public String getOpenFileName(long parentID, String fileName,
+                                long id) {
+    StringBuilder openKey = new StringBuilder();
+    openKey.append(parentID);
+    openKey.append(OM_KEY_PREFIX).append(fileName);
+    openKey.append(OM_KEY_PREFIX).append(id);
+    return openKey.toString();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 1ea225b..d4c0f17 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -35,9 +35,11 @@ import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
@@ -86,7 +88,7 @@ public final class OzoneManagerRatisUtils {
 
   // TODO: Temporary workaround for OM upgrade path and will be replaced once
   //  upgrade HDDS-3698 story reaches consensus.
-  private static boolean omLayoutVersionV1 = true;
+  private static boolean omLayoutVersionV1 = false;
 
   private OzoneManagerRatisUtils() {
   }
@@ -138,6 +140,9 @@ public final class OzoneManagerRatisUtils {
     case CreateKey:
       return new OMKeyCreateRequest(omRequest);
     case CommitKey:
+      if (omLayoutVersionV1) {
+        return new OMKeyCommitRequestV1(omRequest);
+      }
       return new OMKeyCommitRequest(omRequest);
     case DeleteKey:
       return new OMKeyDeleteRequest(omRequest);
@@ -153,6 +158,9 @@ public final class OzoneManagerRatisUtils {
       }
       return new OMDirectoryCreateRequest(omRequest);
     case CreateFile:
+      if (omLayoutVersionV1) {
+        return new OMFileCreateRequestV1(omRequest);
+      }
       return new OMFileCreateRequest(omRequest);
     case PurgeKeys:
       return new OMKeyPurgeRequest(omRequest);
@@ -326,4 +334,13 @@ public final class OzoneManagerRatisUtils {
 
     return true;
   }
+
+  /**
+   * Returns layout version flag represents V1.
+   * @return
+   */
+  public static boolean isOmLayoutVersionV1() {
+    return omLayoutVersionV1;
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
index 8b0727a..c48ff78 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
@@ -147,8 +147,10 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
           omDirectoryResult == NONE) {
 
         // prepare all missing parents
-        missingParentInfos = OMDirectoryCreateRequestV1.getAllParentDirInfo(
-                ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+        missingParentInfos =
+                OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
+                        ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+
         // prepare leafNode dir
         OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(
                 omPathInfo.getLeafNodeName(),
@@ -229,14 +231,15 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
 
   /**
    * Construct OmDirectoryInfo for every parent directory in missing list.
-   * @param ozoneManager
-   * @param keyArgs
-   * @param pathInfo list of parent directories to be created and its ACLs
-   * @param trxnLogIndex
-   * @return
-   * @throws IOException
+   *
+   * @param ozoneManager Ozone Manager
+   * @param keyArgs      key arguments
+   * @param pathInfo     list of parent directories to be created and its ACLs
+   * @param trxnLogIndex transaction log index id
+   * @return list of missing parent directories
+   * @throws IOException DB failure
    */
-  public static List<OmDirectoryInfo> getAllParentDirInfo(
+  public static List<OmDirectoryInfo> getAllMissingParentDirInfo(
           OzoneManager ozoneManager, KeyArgs keyArgs,
           OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex)
           throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index 0619062..82fd4a7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -234,23 +234,10 @@ public class OMFileCreateRequest extends OMKeyRequest {
       List<OzoneAcl> inheritAcls = pathInfo.getAcls();
 
       // Check if a file or directory exists with same key name.
-      if (omDirectoryResult == FILE_EXISTS) {
-        if (!isOverWrite) {
-          throw new OMException("File " + keyName + " already exists",
-              OMException.ResultCodes.FILE_ALREADY_EXISTS);
-        }
-      } else if (omDirectoryResult == DIRECTORY_EXISTS) {
-        throw new OMException("Can not write to directory: " + keyName,
-            OMException.ResultCodes.NOT_A_FILE);
-      } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
-        throw new OMException(
-            "Can not create file: " + keyName + " as there " +
-                "is already file in the given path",
-            OMException.ResultCodes.NOT_A_FILE);
-      }
+      checkDirectoryResult(keyName, isOverWrite, omDirectoryResult);
 
       if (!isRecursive) {
-        checkAllParentsExist(ozoneManager, keyArgs, pathInfo);
+        checkAllParentsExist(keyArgs, pathInfo);
       }
 
       // do open key
@@ -355,8 +342,40 @@ public class OMFileCreateRequest extends OMKeyRequest {
     return omClientResponse;
   }
 
-  private void checkAllParentsExist(OzoneManager ozoneManager,
-      KeyArgs keyArgs,
+  /**
+   * Verify om directory result.
+   *
+   * @param keyName           key name
+   * @param isOverWrite       flag represents whether file can be overwritten
+   * @param omDirectoryResult directory result
+   * @throws OMException if file or directory or file exists in the given path
+   */
+  protected void checkDirectoryResult(String keyName, boolean isOverWrite,
+      OMFileRequest.OMDirectoryResult omDirectoryResult) throws OMException {
+    if (omDirectoryResult == FILE_EXISTS) {
+      if (!isOverWrite) {
+        throw new OMException("File " + keyName + " already exists",
+            OMException.ResultCodes.FILE_ALREADY_EXISTS);
+      }
+    } else if (omDirectoryResult == DIRECTORY_EXISTS) {
+      throw new OMException("Can not write to directory: " + keyName,
+          OMException.ResultCodes.NOT_A_FILE);
+    } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
+      throw new OMException(
+          "Can not create file: " + keyName + " as there " +
+              "is already file in the given path",
+          OMException.ResultCodes.NOT_A_FILE);
+    }
+  }
+
+  /**
+   * Verify the existence of parent directory.
+   *
+   * @param keyArgs  key arguments
+   * @param pathInfo om path info
+   * @throws IOException directory not found
+   */
+  protected void checkAllParentsExist(KeyArgs keyArgs,
       OMFileRequest.OMPathInfo pathInfo) throws IOException {
     String keyName = keyArgs.getKeyName();
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
new file mode 100644
index 0000000..cabd407
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles create file request layout version1.
+ */
+public class OMFileCreateRequestV1 extends OMFileCreateRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMFileCreateRequestV1.class);
+  public OMFileCreateRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
+    KeyArgs keyArgs = createFileRequest.getKeyArgs();
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    // if isRecursive is true, file would be created even if parent
+    // directories does not exist.
+    boolean isRecursive = createFileRequest.getIsRecursive();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("File create for : " + volumeName + "/" + bucketName + "/"
+          + keyName + ":" + isRecursive);
+    }
+
+    // if isOverWrite is true, file would be over written.
+    boolean isOverWrite = createFileRequest.getIsOverwrite();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumCreateFile();
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    boolean acquiredLock = false;
+
+    OmBucketInfo omBucketInfo = null;
+    final List<OmKeyLocationInfo> locations = new ArrayList<>();
+    List<OmDirectoryInfo> missingParentInfos;
+    int numKeysCreated = 0;
+
+    OMClientResponse omClientResponse = null;
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    IOException exception = null;
+    Result result = null;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      if (keyName.length() == 0) {
+        // Check if this is the root of the filesystem.
+        throw new OMException("Can not write to directory: " + keyName,
+                OMException.ResultCodes.NOT_A_FILE);
+      }
+
+      // check Acl
+      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+      // acquire lock
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      OmKeyInfo dbFileInfo = null;
+
+      OMFileRequest.OMPathInfoV1 pathInfoV1 =
+              OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+                      volumeName, bucketName, keyName, Paths.get(keyName));
+
+      if (pathInfoV1.getDirectoryResult()
+              == OMFileRequest.OMDirectoryResult.FILE_EXISTS) {
+        String dbFileKey = omMetadataManager.getOzonePathKey(
+                pathInfoV1.getLastKnownParentId(),
+                pathInfoV1.getLeafNodeName());
+        dbFileInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+                omMetadataManager, dbFileKey, keyName);
+      }
+
+      // check if the file or directory already existed in OM
+      checkDirectoryResult(keyName, isOverWrite,
+              pathInfoV1.getDirectoryResult());
+
+      if (!isRecursive) {
+        checkAllParentsExist(keyArgs, pathInfoV1);
+      }
+
+      // add all missing parents to dir table
+      missingParentInfos =
+              OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
+                      ozoneManager, keyArgs, pathInfoV1, trxnLogIndex);
+
+      // total number of keys created.
+      numKeysCreated = missingParentInfos.size();
+
+      // do open key
+      OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
+          omMetadataManager.getBucketKey(volumeName, bucketName));
+
+      OmKeyInfo omFileInfo = prepareFileInfo(omMetadataManager, keyArgs,
+              dbFileInfo, keyArgs.getDataSize(), locations,
+              getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(),
+              bucketInfo, pathInfoV1, trxnLogIndex,
+              pathInfoV1.getLeafNodeObjectId(),
+              ozoneManager.isRatisEnabled());
+
+      long openVersion = omFileInfo.getLatestVersionLocations().getVersion();
+      long clientID = createFileRequest.getClientID();
+      String dbOpenFileName = omMetadataManager.getOpenFileName(
+              pathInfoV1.getLastKnownParentId(), pathInfoV1.getLeafNodeName(),
+              clientID);
+
+      // Append new blocks
+      List<OmKeyLocationInfo> newLocationList = keyArgs.getKeyLocationsList()
+          .stream().map(OmKeyLocationInfo::getFromProtobuf)
+          .collect(Collectors.toList());
+      omFileInfo.appendNewBlocks(newLocationList, false);
+
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+      // check bucket and volume quota
+      long preAllocatedSpace = newLocationList.size()
+              * ozoneManager.getScmBlockSize()
+              * omFileInfo.getFactor().getNumber();
+      checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace);
+      checkBucketQuotaInNamespace(omBucketInfo, 1L);
+
+      // Add to cache entry can be done outside of lock for this openKey.
+      // Even if bucket gets deleted, when commitKey we shall identify if
+      // bucket gets deleted.
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
+              dbOpenFileName, omFileInfo, pathInfoV1.getLeafNodeName(),
+              trxnLogIndex);
+
+      // Add cache entries for the prefix directories.
+      // Skip adding for the file key itself, until Key Commit.
+      OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+              Optional.absent(), Optional.of(missingParentInfos),
+              trxnLogIndex);
+
+      omBucketInfo.incrUsedBytes(preAllocatedSpace);
+      // Update namespace quota
+      omBucketInfo.incrUsedNamespace(1L);
+
+      // Prepare response. Sets user given full key name in the 'keyName'
+      // attribute in response object.
+      int clientVersion = getOmRequest().getVersion();
+      omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
+          .setKeyInfo(omFileInfo.getProtobuf(keyName, clientVersion))
+          .setID(clientID)
+          .setOpenVersion(openVersion).build())
+          .setCmdType(Type.CreateFile);
+      omClientResponse = new OMFileCreateResponseV1(omResponse.build(),
+              omFileInfo, missingParentInfos, clientID, omBucketInfo);
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omMetrics.incNumCreateFileFails();
+      omResponse.setCmdType(Type.CreateFile);
+      omClientResponse = new OMFileCreateResponse(createErrorOMResponse(
+            omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+          omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+            bucketName);
+      }
+    }
+
+    // Audit Log outside the lock
+    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+        OMAction.CREATE_FILE, auditMap, exception,
+        getOmRequest().getUserInfo()));
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.incNumKeys(numKeysCreated);
+      LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName,
+          bucketName, keyName);
+      break;
+    case FAILURE:
+      LOG.error("File create failed. Volume:{}, Bucket:{}, Key{}.",
+          volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMFileCreateRequest: {}",
+          createFileRequest);
+    }
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index d5543ba..ce8d49b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -25,6 +25,7 @@ import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -34,6 +35,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.jetbrains.annotations.Nullable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -396,7 +398,7 @@ public final class OMFileRequest {
   /**
    * Adding directory info to the Table cache.
    *
-   * @param omMetadataManager  OM Metdata Manager
+   * @param omMetadataManager  OM Metadata Manager
    * @param dirInfo            directory info
    * @param missingParentInfos list of the parents to be added to DB
    * @param trxnLogIndex       transaction log index
@@ -422,4 +424,132 @@ public final class OMFileRequest {
     }
   }
 
+  /**
+   * Adding Key info to the openFile Table cache.
+   *
+   * @param omMetadataManager OM Metadata Manager
+   * @param dbOpenFileName    open file name key
+   * @param omFileInfo        key info
+   * @param fileName          file name
+   * @param trxnLogIndex      transaction log index
+   * @return dbOmFileInfo, which keeps leaf node name in keyName field
+   */
+  public static void addOpenFileTableCacheEntry(
+          OMMetadataManager omMetadataManager, String dbOpenFileName,
+          @Nullable OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+
+    Optional<OmKeyInfo> keyInfoOptional = Optional.absent();
+    if (omFileInfo != null) {
+      // New key format for the openFileTable.
+      // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+      // keyName field stores only the leaf node name, which is 'file1'.
+      omFileInfo.setKeyName(fileName);
+      keyInfoOptional = Optional.of(omFileInfo);
+    }
+
+    omMetadataManager.getOpenKeyTable().addCacheEntry(
+            new CacheKey<>(dbOpenFileName),
+            new CacheValue<>(keyInfoOptional, trxnLogIndex));
+  }
+
+  /**
+   * Adding Key info to the file table cache.
+   *
+   * @param omMetadataManager OM Metadata Manager
+   * @param dbFileKey         file name key
+   * @param omFileInfo        key info
+   * @param fileName          file name
+   * @param trxnLogIndex      transaction log index
+   * @return dbOmFileInfo, which keeps leaf node name in keyName field
+   */
+  public static void addFileTableCacheEntry(
+          OMMetadataManager omMetadataManager, String dbFileKey,
+          OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+
+    // New key format for the fileTable.
+    // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+    // keyName field stores only the leaf node name, which is 'file1'.
+    omFileInfo.setKeyName(fileName);
+
+    omMetadataManager.getKeyTable().addCacheEntry(
+            new CacheKey<>(dbFileKey),
+            new CacheValue<>(Optional.of(omFileInfo), trxnLogIndex));
+  }
+
+  /**
+   * Adding omKeyInfo to open file table.
+   *
+   * @param omMetadataMgr    OM Metadata Manager
+   * @param batchOp          batch of db operations
+   * @param omFileInfo       omKeyInfo
+   * @param openKeySessionID clientID
+   * @throws IOException DB failure
+   */
+  public static void addToOpenFileTable(OMMetadataManager omMetadataMgr,
+                                        BatchOperation batchOp,
+                                        OmKeyInfo omFileInfo,
+                                        long openKeySessionID)
+          throws IOException {
+
+    String dbOpenFileKey = omMetadataMgr.getOpenFileName(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName(),
+            openKeySessionID);
+
+    omMetadataMgr.getOpenKeyTable().putWithBatch(batchOp, dbOpenFileKey,
+            omFileInfo);
+  }
+
+  /**
+   * Adding omKeyInfo to file table.
+   *
+   * @param omMetadataMgr
+   * @param batchOp
+   * @param omFileInfo
+   * @throws IOException
+   */
+  public static void addToFileTable(OMMetadataManager omMetadataMgr,
+                                    BatchOperation batchOp,
+                                    OmKeyInfo omFileInfo)
+          throws IOException {
+
+    String dbFileKey = omMetadataMgr.getOzonePathKey(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName());
+
+    omMetadataMgr.getKeyTable().putWithBatch(batchOp,
+            dbFileKey, omFileInfo);
+  }
+
+  /**
+   * Gets om key info from open key table if openFileTable flag is true,
+   * otherwise get it from key table.
+   *
+   * @param openFileTable if true add KeyInfo to openFileTable, otherwise to
+   *                      fileTable
+   * @param omMetadataMgr OM Metadata Manager
+   * @param dbOpenFileKey open file kaye name in DB
+   * @param keyName       key name
+   * @return om key info
+   * @throws IOException DB failure
+   */
+  public static OmKeyInfo getOmKeyInfoFromFileTable(boolean openFileTable,
+      OMMetadataManager omMetadataMgr, String dbOpenFileKey, String keyName)
+          throws IOException {
+
+    OmKeyInfo dbOmKeyInfo;
+    if (openFileTable) {
+      dbOmKeyInfo = omMetadataMgr.getOpenKeyTable().get(dbOpenFileKey);
+    } else {
+      dbOmKeyInfo = omMetadataMgr.getKeyTable().get(dbOpenFileKey);
+    }
+
+    // DB OMKeyInfo will store only fileName into keyName field. This
+    // function is to set user given keyName into the OmKeyInfo object.
+    // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+    // keyName field stores only the leaf node name, which is 'file1'.
+    if (dbOmKeyInfo != null) {
+      dbOmKeyInfo.setKeyName(keyName);
+    }
+    return dbOmKeyInfo;
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index fe72ea2..c0bc773 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -233,6 +233,30 @@ public class OMKeyCommitRequest extends OMKeyRequest {
     auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
           exception, getOmRequest().getUserInfo()));
 
+    processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics,
+            exception, omKeyInfo, result);
+
+    return omClientResponse;
+  }
+
+  /**
+   * Process result of om request execution.
+   *
+   * @param commitKeyRequest commit key request
+   * @param volumeName       volume name
+   * @param bucketName       bucket name
+   * @param keyName          key name
+   * @param omMetrics        om metrics
+   * @param exception        exception trace
+   * @param omKeyInfo        omKeyInfo
+   * @param result           stores the result of the execution
+   */
+  @SuppressWarnings("parameternumber")
+  protected void processResult(CommitKeyRequest commitKeyRequest,
+                               String volumeName, String bucketName,
+                               String keyName, OMMetrics omMetrics,
+                               IOException exception, OmKeyInfo omKeyInfo,
+                               Result result) {
     switch (result) {
     case SUCCESS:
       // As when we commit the key, then it is visible in ozone, so we should
@@ -244,18 +268,16 @@ public class OMKeyCommitRequest extends OMKeyRequest {
         omMetrics.incNumKeys();
       }
       LOG.debug("Key committed. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
+              bucketName, keyName);
       break;
     case FAILURE:
-      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}.",
-          volumeName, bucketName, keyName, exception);
+      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}. Exception:{}",
+              volumeName, bucketName, keyName, exception);
       omMetrics.incNumKeyCommitFails();
       break;
     default:
       LOG.error("Unrecognized Result for OMKeyCommitRequest: {}",
-          commitKeyRequest);
+              commitKeyRequest);
     }
-
-    return omClientResponse;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
similarity index 53%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
index fe72ea2..d7ffa2d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -18,87 +18,57 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 
 /**
- * Handles CommitKey request.
+ * Handles CommitKey request layout version V1.
  */
-public class OMKeyCommitRequest extends OMKeyRequest {
+public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyCommitRequest.class);
+          LoggerFactory.getLogger(OMKeyCommitRequestV1.class);
 
-  public OMKeyCommitRequest(OMRequest omRequest) {
+  public OMKeyCommitRequestV1(OMRequest omRequest) {
     super(omRequest);
   }
 
   @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
-    Preconditions.checkNotNull(commitKeyRequest);
-
-    KeyArgs keyArgs = commitKeyRequest.getKeyArgs();
-
-    // Verify key name
-    final boolean checkKeyNameEnabled = ozoneManager.getConfiguration()
-         .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY,
-                 OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT);
-    if(checkKeyNameEnabled){
-      OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(),
-              OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX));
-    }
-
-    KeyArgs.Builder newKeyArgs =
-        keyArgs.toBuilder().setModificationTime(Time.now())
-            .setKeyName(validateAndNormalizeKey(
-                ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName()));
-
-    return getOmRequest().toBuilder()
-        .setCommitKeyRequest(commitKeyRequest.toBuilder()
-            .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build();
-  }
-
-  @Override
   @SuppressWarnings("methodlength")
   public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
       long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
@@ -119,10 +89,11 @@ public class OMKeyCommitRequest extends OMKeyRequest {
     Map<String, String> auditMap = buildKeyArgsAuditMap(commitKeyArgs);
 
     OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
-        getOmRequest());
+            getOmRequest());
 
     IOException exception = null;
     OmKeyInfo omKeyInfo = null;
+    OmVolumeArgs omVolumeArgs = null;
     OmBucketInfo omBucketInfo = null;
     OMClientResponse omClientResponse = null;
     boolean bucketLockAcquired = false;
@@ -137,14 +108,13 @@ public class OMKeyCommitRequest extends OMKeyRequest {
 
       // check Acl
       checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName,
-          keyName, IAccessAuthorizer.ACLType.WRITE,
-          commitKeyRequest.getClientID());
+              keyName, IAccessAuthorizer.ACLType.WRITE,
+              commitKeyRequest.getClientID());
 
-      String dbOzoneKey =
-          omMetadataManager.getOzoneKey(volumeName, bucketName,
-              keyName);
-      String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-          keyName, commitKeyRequest.getClientID());
+
+      String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+      Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+      String dbOpenFileKey = null;
 
       List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
       for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) {
@@ -152,110 +122,148 @@ public class OMKeyCommitRequest extends OMKeyRequest {
       }
 
       bucketLockAcquired =
-          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-              volumeName, bucketName);
+              omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+                      volumeName, bucketName);
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
-      // Check for directory exists with same name, if it exists throw error.
-      if (ozoneManager.getEnableFileSystemPaths()) {
-        if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
-            omMetadataManager)) {
-          throw new OMException("Can not create file: " + keyName +
-              " as there is already directory in the given path", NOT_A_FILE);
-        }
-        // Ensure the parent exist.
-        if (!"".equals(OzoneFSUtils.getParent(keyName))
-            && !checkDirectoryAlreadyExists(volumeName, bucketName,
-            OzoneFSUtils.getParent(keyName), omMetadataManager)) {
-          throw new OMException("Cannot create file : " + keyName
-              + " as parent directory doesn't exist",
-              OMException.ResultCodes.DIRECTORY_NOT_FOUND);
-        }
-      }
-
-      omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey);
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
+      long bucketId = omBucketInfo.getObjectID();
+      long parentID = getParentID(bucketId, pathComponents, keyName,
+              omMetadataManager, ozoneManager);
+      String dbFileKey = omMetadataManager.getOzonePathKey(parentID, fileName);
+      dbOpenFileKey = omMetadataManager.getOpenFileName(parentID, fileName,
+              commitKeyRequest.getClientID());
+
+      omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true,
+              omMetadataManager, dbOpenFileKey, keyName);
       if (omKeyInfo == null) {
-        throw new OMException("Failed to commit key, as " + dbOpenKey +
-            "entry is not found in the OpenKey table", KEY_NOT_FOUND);
+        throw new OMException("Failed to commit key, as " + dbOpenFileKey +
+                "entry is not found in the OpenKey table", KEY_NOT_FOUND);
       }
       omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
 
       omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
 
       // Update the block length for each block
-      List<OmKeyLocationInfo> allocatedLocationInfoList =
-          omKeyInfo.getLatestVersionLocations().getLocationList();
-      omKeyInfo.updateLocationInfoList(locationInfoList, false);
+      omKeyInfo.updateLocationInfoList(locationInfoList);
 
       // Set the UpdateID to current transactionLogIndex
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
       // Add to cache of open key table and key table.
-      omMetadataManager.getOpenKeyTable().addCacheEntry(
-          new CacheKey<>(dbOpenKey),
-          new CacheValue<>(Optional.absent(), trxnLogIndex));
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, dbFileKey,
+              null, fileName, trxnLogIndex);
 
-      omMetadataManager.getKeyTable().addCacheEntry(
-          new CacheKey<>(dbOzoneKey),
-          new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      OMFileRequest.addFileTableCacheEntry(omMetadataManager, dbFileKey,
+              omKeyInfo, fileName, trxnLogIndex);
 
       long scmBlockSize = ozoneManager.getScmBlockSize();
       int factor = omKeyInfo.getFactor().getNumber();
-      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
       // Block was pre-requested and UsedBytes updated when createKey and
       // AllocatedBlock. The space occupied by the Key shall be based on
       // the actual Key size, and the total Block size applied before should
       // be subtracted.
       long correctedSpace = omKeyInfo.getDataSize() * factor -
-          allocatedLocationInfoList.size() * scmBlockSize * factor;
+              locationInfoList.size() * scmBlockSize * factor;
       omBucketInfo.incrUsedBytes(correctedSpace);
 
-      omClientResponse = new OMKeyCommitResponse(omResponse.build(),
-          omKeyInfo, dbOzoneKey, dbOpenKey, omBucketInfo.copyObject());
+      omClientResponse = new OMKeyCommitResponseV1(omResponse.build(),
+              omKeyInfo, dbFileKey, dbOpenFileKey, omVolumeArgs, omBucketInfo);
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new OMKeyCommitResponse(createErrorOMResponse(
-          omResponse, exception));
+      omClientResponse = new OMKeyCommitResponseV1(createErrorOMResponse(
+              omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
-          omDoubleBufferHelper);
+              omDoubleBufferHelper);
 
       if(bucketLockAcquired) {
         omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
+                bucketName);
       }
     }
 
     auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
-          exception, getOmRequest().getUserInfo()));
-
-    switch (result) {
-    case SUCCESS:
-      // As when we commit the key, then it is visible in ozone, so we should
-      // increment here.
-      // As key also can have multiple versions, we need to increment keys
-      // only if version is 0. Currently we have not complete support of
-      // versioning of keys. So, this can be revisited later.
-      if (omKeyInfo.getKeyLocationVersions().size() == 1) {
-        omMetrics.incNumKeys();
+            exception, getOmRequest().getUserInfo()));
+
+    processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics,
+            exception, omKeyInfo, result);
+
+    return omClientResponse;
+  }
+
+
+  /**
+   * Check for directory exists with same name, if it exists throw error.
+   *
+   * @param keyName                  key name
+   * @param ozoneManager             Ozone Manager
+   * @param reachedLastPathComponent true if the path component is a fileName
+   * @throws IOException if directory exists with same name
+   */
+  private void checkDirectoryAlreadyExists(String keyName,
+                                           OzoneManager ozoneManager,
+                                           boolean reachedLastPathComponent)
+          throws IOException {
+    // Reached last component, which would be a file. Returns its parentID.
+    if (reachedLastPathComponent && ozoneManager.getEnableFileSystemPaths()) {
+      throw new OMException("Can not create file: " + keyName +
+              " as there is already directory in the given path", NOT_A_FILE);
+    }
+  }
+
+  /**
+   * Get parent id for the user given path.
+   *
+   * @param bucketId          bucket id
+   * @param pathComponents    fie path elements
+   * @param keyName           user given key name
+   * @param omMetadataManager metadata manager
+   * @return lastKnownParentID
+   * @throws IOException DB failure or parent not exists in DirectoryTable
+   */
+  private long getParentID(long bucketId, Iterator<Path> pathComponents,
+                           String keyName, OMMetadataManager omMetadataManager,
+                           OzoneManager ozoneManager)
+          throws IOException {
+
+    long lastKnownParentId = bucketId;
+
+    // If no sub-dirs then bucketID is the root/parent.
+    if(!pathComponents.hasNext()){
+      return bucketId;
+    }
+
+    OmDirectoryInfo omDirectoryInfo;
+    while (pathComponents.hasNext()) {
+      String nodeName = pathComponents.next().toString();
+      boolean reachedLastPathComponent = !pathComponents.hasNext();
+      String dbNodeName =
+              omMetadataManager.getOzonePathKey(lastKnownParentId, nodeName);
+
+      omDirectoryInfo = omMetadataManager.
+              getDirectoryTable().get(dbNodeName);
+      if (omDirectoryInfo != null) {
+        checkDirectoryAlreadyExists(keyName, ozoneManager,
+                reachedLastPathComponent);
+        lastKnownParentId = omDirectoryInfo.getObjectID();
+      } else {
+        // One of the sub-dir doesn't exists in DB. Immediate parent should
+        // exists for committing the key, otherwise will fail the operation.
+        if (!reachedLastPathComponent) {
+          throw new OMException("Failed to commit key, as parent directory of "
+                  + keyName + " entry is not found in DirectoryTable",
+                  KEY_NOT_FOUND);
+        }
+        break;
       }
-      LOG.debug("Key committed. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
-      break;
-    case FAILURE:
-      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}.",
-          volumeName, bucketName, keyName, exception);
-      omMetrics.incNumKeyCommitFails();
-      break;
-    default:
-      LOG.error("Unrecognized Result for OMKeyCommitRequest: {}",
-          commitKeyRequest);
     }
 
-    return omClientResponse;
+    return lastKnownParentId;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 814e065..7319690 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
@@ -240,38 +241,6 @@ public abstract class OMKeyRequest extends OMClientRequest {
     return edek;
   }
 
-  /**
-   * Create OmKeyInfo object.
-   * @return OmKeyInfo
-   */
-  @SuppressWarnings("parameterNumber")
-  protected OmKeyInfo createKeyInfo(@Nonnull KeyArgs keyArgs,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      @Nonnull HddsProtos.ReplicationFactor factor,
-      @Nonnull HddsProtos.ReplicationType type, long size,
-      @Nullable FileEncryptionInfo encInfo,
-      @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo,
-      long transactionLogIndex, long objectID) {
-    return new OmKeyInfo.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, locations)))
-        .setCreationTime(keyArgs.getModificationTime())
-        .setModificationTime(keyArgs.getModificationTime())
-        .setDataSize(size)
-        .setReplicationType(type)
-        .setReplicationFactor(factor)
-        .setFileEncryptionInfo(encInfo)
-        .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
-        .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
-        .setObjectID(objectID)
-        .setUpdateID(transactionLogIndex)
-        .build();
-  }
-
   private List< OzoneAcl > getAclsForKey(KeyArgs keyArgs,
       OmBucketInfo bucketInfo, PrefixManager prefixManager) {
     List<OzoneAcl> acls = new ArrayList<>();
@@ -311,96 +280,6 @@ public abstract class OMKeyRequest extends OMClientRequest {
   }
 
   /**
-   * Prepare OmKeyInfo which will be persisted to openKeyTable.
-   * @return OmKeyInfo
-   * @throws IOException
-   */
-  @SuppressWarnings("parameternumber")
-  protected OmKeyInfo prepareKeyInfo(
-      @Nonnull OMMetadataManager omMetadataManager,
-      @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      @Nullable FileEncryptionInfo encInfo,
-      @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo,
-      long transactionLogIndex,
-      @Nonnull long objectID,
-      boolean isRatisEnabled)
-      throws IOException {
-    if (keyArgs.getIsMultipartKey()) {
-      return prepareMultipartKeyInfo(omMetadataManager, keyArgs,
-          size, locations, encInfo, prefixManager, omBucketInfo,
-          transactionLogIndex, objectID);
-      //TODO args.getMetadata
-    }
-    if (dbKeyInfo != null) {
-      // TODO: Need to be fixed, as when key already exists, we are
-      //  appending new blocks to existing key.
-      // The key already exist, the new blocks will be added as new version
-      // when locations.size = 0, the new version will have identical blocks
-      // as its previous version
-      dbKeyInfo.addNewVersion(locations, false);
-      dbKeyInfo.setDataSize(size + dbKeyInfo.getDataSize());
-      // The modification time is set in preExecute. Use the same
-      // modification time.
-      dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
-      dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
-      return dbKeyInfo;
-    }
-
-    // the key does not exist, create a new object.
-    // Blocks will be appended as version 0.
-    return createKeyInfo(keyArgs, locations, keyArgs.getFactor(),
-        keyArgs.getType(), keyArgs.getDataSize(), encInfo, prefixManager,
-        omBucketInfo, transactionLogIndex, objectID);
-  }
-
-  /**
-   * Prepare OmKeyInfo for multi-part upload part key which will be persisted
-   * to openKeyTable.
-   * @return OmKeyInfo
-   * @throws IOException
-   */
-  @SuppressWarnings("parameternumber")
-  private OmKeyInfo prepareMultipartKeyInfo(
-      @Nonnull OMMetadataManager omMetadataManager,
-      @Nonnull KeyArgs args, long size,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      FileEncryptionInfo encInfo,  @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo, @Nonnull long transactionLogIndex,
-      @Nonnull long objectId)
-      throws IOException {
-    HddsProtos.ReplicationFactor factor;
-    HddsProtos.ReplicationType type;
-
-    Preconditions.checkArgument(args.getMultipartNumber() > 0,
-        "PartNumber Should be greater than zero");
-    // When key is multipart upload part key, we should take replication
-    // type and replication factor from original key which has done
-    // initiate multipart upload. If we have not found any such, we throw
-    // error no such multipart upload.
-    String uploadID = args.getMultipartUploadID();
-    Preconditions.checkNotNull(uploadID);
-    String multipartKey = omMetadataManager
-        .getMultipartKey(args.getVolumeName(), args.getBucketName(),
-            args.getKeyName(), uploadID);
-    OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
-        multipartKey);
-    if (partKeyInfo == null) {
-      throw new OMException("No such Multipart upload is with specified " +
-          "uploadId " + uploadID,
-          OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-    } else {
-      factor = partKeyInfo.getFactor();
-      type = partKeyInfo.getType();
-    }
-    // For this upload part we don't need to check in KeyTable. As this
-    // is not an actual key, it is a part of the key.
-    return createKeyInfo(args, locations, factor, type, size, encInfo,
-        prefixManager, omBucketInfo, transactionLogIndex, objectId);
-  }
-
-  /**
    * Check Acls for the ozone bucket.
    * @param ozoneManager
    * @param volume
@@ -418,7 +297,6 @@ public abstract class OMKeyRequest extends OMClientRequest {
     }
   }
 
-
   /**
    * Check Acls for the ozone key.
    * @param ozoneManager
@@ -679,4 +557,158 @@ public abstract class OMKeyRequest extends OMClientRequest {
         new CacheKey<>(omMetadataManager.getBucketKey(volume, bucket)))
         .getCacheValue();
   }
+
+  /**
+   * Prepare OmKeyInfo which will be persisted to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  protected OmKeyInfo prepareKeyInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          @Nullable FileEncryptionInfo encInfo,
+          @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          long transactionLogIndex, long objectID, boolean isRatisEnabled)
+          throws IOException {
+
+    return prepareFileInfo(omMetadataManager, keyArgs, dbKeyInfo, size,
+            locations, encInfo, prefixManager, omBucketInfo, null,
+            transactionLogIndex, objectID, isRatisEnabled);
+  }
+
+  /**
+   * Prepare OmKeyInfo which will be persisted to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  protected OmKeyInfo prepareFileInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          @Nullable FileEncryptionInfo encInfo,
+          @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          OMFileRequest.OMPathInfoV1 omPathInfo,
+          long transactionLogIndex, long objectID,
+          boolean isRatisEnabled)
+          throws IOException {
+    if (keyArgs.getIsMultipartKey()) {
+      return prepareMultipartFileInfo(omMetadataManager, keyArgs,
+              size, locations, encInfo, prefixManager, omBucketInfo,
+              omPathInfo, transactionLogIndex, objectID);
+      //TODO args.getMetadata
+    }
+    if (dbKeyInfo != null) {
+      // TODO: Need to be fixed, as when key already exists, we are
+      //  appending new blocks to existing key.
+      // The key already exist, the new blocks will be added as new version
+      // when locations.size = 0, the new version will have identical blocks
+      // as its previous version
+      dbKeyInfo.addNewVersion(locations, false);
+      dbKeyInfo.setDataSize(size + dbKeyInfo.getDataSize());
+      // The modification time is set in preExecute. Use the same
+      // modification time.
+      dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
+      dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
+      return dbKeyInfo;
+    }
+
+    // the key does not exist, create a new object.
+    // Blocks will be appended as version 0.
+    return createFileInfo(keyArgs, locations, keyArgs.getFactor(),
+            keyArgs.getType(), keyArgs.getDataSize(), encInfo, prefixManager,
+            omBucketInfo, omPathInfo, transactionLogIndex, objectID);
+  }
+
+  /**
+   * Create OmKeyInfo object.
+   * @return OmKeyInfo
+   */
+  @SuppressWarnings("parameterNumber")
+  protected OmKeyInfo createFileInfo(@Nonnull KeyArgs keyArgs,
+      @Nonnull List<OmKeyLocationInfo> locations,
+      @Nonnull HddsProtos.ReplicationFactor factor,
+      @Nonnull HddsProtos.ReplicationType type, long size,
+      @Nullable FileEncryptionInfo encInfo,
+      @Nonnull PrefixManager prefixManager,
+      @Nullable OmBucketInfo omBucketInfo,
+      OMFileRequest.OMPathInfoV1 omPathInfo,
+      long transactionLogIndex, long objectID) {
+
+    OmKeyInfo.Builder builder = new OmKeyInfo.Builder();
+    builder.setVolumeName(keyArgs.getVolumeName())
+            .setBucketName(keyArgs.getBucketName())
+            .setKeyName(keyArgs.getKeyName())
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, locations)))
+            .setCreationTime(keyArgs.getModificationTime())
+            .setModificationTime(keyArgs.getModificationTime())
+            .setDataSize(size)
+            .setReplicationType(type)
+            .setReplicationFactor(factor)
+            .setFileEncryptionInfo(encInfo)
+            .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
+            .addAllMetadata(KeyValueUtil.getFromProtobuf(
+                    keyArgs.getMetadataList()))
+            .setUpdateID(transactionLogIndex);
+    if (omPathInfo != null) {
+      // FileTable metadata format
+      objectID = omPathInfo.getLeafNodeObjectId();
+      builder.setParentObjectID(omPathInfo.getLastKnownParentId());
+      builder.setFileName(omPathInfo.getLeafNodeName());
+    }
+    builder.setObjectID(objectID);
+    return builder.build();
+  }
+
+  /**
+   * Prepare OmKeyInfo for multi-part upload part key which will be persisted
+   * to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  private OmKeyInfo prepareMultipartFileInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs args, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          FileEncryptionInfo encInfo,  @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          OMFileRequest.OMPathInfoV1 omPathInfo,
+          @Nonnull long transactionLogIndex, long objectID)
+          throws IOException {
+    HddsProtos.ReplicationFactor factor;
+    HddsProtos.ReplicationType type;
+
+    Preconditions.checkArgument(args.getMultipartNumber() > 0,
+            "PartNumber Should be greater than zero");
+    // When key is multipart upload part key, we should take replication
+    // type and replication factor from original key which has done
+    // initiate multipart upload. If we have not found any such, we throw
+    // error no such multipart upload.
+    String uploadID = args.getMultipartUploadID();
+    Preconditions.checkNotNull(uploadID);
+    String multipartKey = omMetadataManager
+            .getMultipartKey(args.getVolumeName(), args.getBucketName(),
+                    args.getKeyName(), uploadID);
+    OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
+            multipartKey);
+    if (partKeyInfo == null) {
+      throw new OMException("No such Multipart upload is with specified " +
+              "uploadId " + uploadID,
+              OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
+    } else {
+      factor = partKeyInfo.getFactor();
+      type = partKeyInfo.getType();
+    }
+    // For this upload part we don't need to check in KeyTable. As this
+    // is not an actual key, it is a part of the key.
+    return createFileInfo(args, locations, factor, type, size, encInfo,
+            prefixManager, omBucketInfo, omPathInfo, transactionLogIndex,
+            objectID);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
new file mode 100644
index 0000000..ccaaa6b
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for create file request layout version V1.
+ */
+@CleanupTableInfo(cleanupTables = OPEN_FILE_TABLE)
+public class OMFileCreateResponseV1 extends OMFileCreateResponse {
+
+  private List<OmDirectoryInfo> parentDirInfos;
+
+  public OMFileCreateResponseV1(@Nonnull OMResponse omResponse,
+                                @Nonnull OmKeyInfo omKeyInfo,
+                                @Nonnull List<OmDirectoryInfo> parentDirInfos,
+                                long openKeySessionID,
+                                @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, new ArrayList<>(), openKeySessionID,
+        omBucketInfo);
+    this.parentDirInfos = parentDirInfos;
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataMgr,
+                              BatchOperation batchOp) throws IOException {
+
+    /**
+     * Create parent directory entries during Key Create - do not wait
+     * for Key Commit request.
+     * XXX handle stale directory entries.
+     */
+    if (parentDirInfos != null) {
+      for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+        String parentKey = parentDirInfo.getPath();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("putWithBatch adding parent : key {} info : {}", parentKey,
+                  parentDirInfo);
+        }
+        omMetadataMgr.getDirectoryTable().putWithBatch(batchOp, parentKey,
+                parentDirInfo);
+      }
+    }
+
+    OMFileRequest.addToOpenFileTable(omMetadataMgr, batchOp, getOmKeyInfo(),
+            getOpenKeySessionID());
+
+    // update bucket usedBytes.
+    omMetadataMgr.getBucketTable().putWithBatch(batchOp,
+            omMetadataMgr.getBucketKey(getOmKeyInfo().getVolumeName(),
+                    getOmKeyInfo().getBucketName()), getOmBucketInfo());
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
index 5d43b27..ebd3263 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
@@ -79,4 +79,19 @@ public class OMKeyCommitResponse extends OMClientResponse {
             omBucketInfo.getBucketName()), omBucketInfo);
   }
 
+  protected String getOpenKeyName() {
+    return openKeyName;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
+
+  protected String getOzoneKeyName() {
+    return ozoneKeyName;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
similarity index 59%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
index 5d43b27..c0840e3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
@@ -18,65 +18,60 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
-import java.io.IOException;
 import javax.annotation.Nonnull;
 
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
 
 /**
- * Response for CommitKey request.
+ * Response for CommitKey request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE, KEY_TABLE})
-public class OMKeyCommitResponse extends OMClientResponse {
-
-  private OmKeyInfo omKeyInfo;
-  private String ozoneKeyName;
-  private String openKeyName;
-  private OmBucketInfo omBucketInfo;
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, FILE_TABLE})
+public class OMKeyCommitResponseV1 extends OMKeyCommitResponse {
 
-  public OMKeyCommitResponse(@Nonnull OMResponse omResponse,
-      @Nonnull OmKeyInfo omKeyInfo, String ozoneKeyName, String openKeyName,
-      @Nonnull OmBucketInfo omBucketInfo) {
-    super(omResponse);
-    this.omKeyInfo = omKeyInfo;
-    this.ozoneKeyName = ozoneKeyName;
-    this.openKeyName = openKeyName;
-    this.omBucketInfo = omBucketInfo;
+  public OMKeyCommitResponseV1(@Nonnull OMResponse omResponse,
+                               @Nonnull OmKeyInfo omKeyInfo,
+                               String ozoneKeyName, String openKeyName,
+                               @Nonnull OmVolumeArgs omVolumeArgs,
+                               @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, ozoneKeyName, openKeyName,
+            omBucketInfo);
   }
 
   /**
    * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
-  public OMKeyCommitResponse(@Nonnull OMResponse omResponse) {
+  public OMKeyCommitResponseV1(@Nonnull OMResponse omResponse) {
     super(omResponse);
     checkStatusNotOK();
   }
 
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
+                           BatchOperation batchOperation) throws IOException {
 
     // Delete from OpenKey table
     omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-        openKeyName);
+            getOpenKeyName());
 
-    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName,
-        omKeyInfo);
+    OMFileRequest.addToFileTable(omMetadataManager, batchOperation,
+            getOmKeyInfo());
 
     // update bucket usedBytes.
     omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-        omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-            omBucketInfo.getBucketName()), omBucketInfo);
+            omMetadataManager.getBucketKey(getOmBucketInfo().getVolumeName(),
+                    getOmBucketInfo().getBucketName()), getOmBucketInfo());
   }
-
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
index 98b1927..d170ef4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
@@ -69,7 +69,7 @@ public class OMKeyCreateResponse extends OMClientResponse {
   }
 
   @Override
-  protected void addToDBBatch(OMMetadataManager omMetadataManager,
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
     /**
@@ -101,5 +101,17 @@ public class OMKeyCreateResponse extends OMClientResponse {
         omMetadataManager.getBucketKey(omKeyInfo.getVolumeName(),
             omKeyInfo.getBucketName()), omBucketInfo);
   }
+
+  protected long getOpenKeySessionID() {
+    return openKeySessionID;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index a6ca2cf..2d4b5cb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType;
 
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
 import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -814,4 +815,113 @@ public final class TestOMRequestUtils {
         new CacheKey<>(dbVolumeKey),
         new CacheValue<>(Optional.of(omVolumeArgs), 1L));
   }
+
+  /**
+   * Create OmKeyInfo.
+   */
+  @SuppressWarnings("parameterNumber")
+  public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
+      String keyName, HddsProtos.ReplicationType replicationType,
+      HddsProtos.ReplicationFactor replicationFactor, long objectID,
+      long parentID, long trxnLogIndex, long creationTime) {
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return new OmKeyInfo.Builder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(keyName)
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+            .setCreationTime(creationTime)
+            .setModificationTime(Time.now())
+            .setDataSize(1000L)
+            .setReplicationType(replicationType)
+            .setReplicationFactor(replicationFactor)
+            .setObjectID(objectID)
+            .setUpdateID(trxnLogIndex)
+            .setParentObjectID(parentID)
+            .setFileName(fileName)
+            .build();
+  }
+
+
+  /**
+   * Add key entry to KeyTable. if openKeyTable flag is true, add's entries
+   * to openKeyTable, else add's it to keyTable.
+   *
+   * @throws Exception DB failure
+   */
+  public static void addFileToKeyTable(boolean openKeyTable,
+                                       boolean addToCache, String fileName,
+                                       OmKeyInfo omKeyInfo,
+                                       long clientID, long trxnLogIndex,
+                                       OMMetadataManager omMetadataManager)
+          throws Exception {
+    if (openKeyTable) {
+      String ozoneKey = omMetadataManager.getOpenFileName(
+              omKeyInfo.getParentObjectID(), fileName, clientID);
+      if (addToCache) {
+        omMetadataManager.getOpenKeyTable().addCacheEntry(
+                new CacheKey<>(ozoneKey),
+                new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      }
+      omMetadataManager.getOpenKeyTable().put(ozoneKey, omKeyInfo);
+    } else {
+      String ozoneKey = omMetadataManager.getOzonePathKey(
+              omKeyInfo.getParentObjectID(), fileName);
+      if (addToCache) {
+        omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
+                new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      }
+      omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+    }
+  }
+
+  /**
+   * Gets bucketId from OM metadata manager.
+   *
+   * @param volumeName        volume name
+   * @param bucketName        bucket name
+   * @param omMetadataManager metadata manager
+   * @return bucket Id
+   * @throws Exception DB failure
+   */
+  public static long getBucketId(String volumeName, String bucketName,
+                                 OMMetadataManager omMetadataManager)
+          throws Exception {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    return omBucketInfo.getObjectID();
+  }
+
+  /**
+   * Add path components to the directory table and returns last directory's
+   * object id.
+   *
+   * @param volumeName volume name
+   * @param bucketName bucket name
+   * @param key        key name
+   * @param omMetaMgr  metdata manager
+   * @return last directory object id
+   * @throws Exception
+   */
+  public static long addParentsToDirTable(String volumeName, String bucketName,
+                                    String key, OMMetadataManager omMetaMgr)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetaMgr);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long objectId = bucketId + 10;
+    long parentId = bucketId;
+    long txnID = 50;
+    for (String pathElement : pathComponents) {
+      OmDirectoryInfo omDirInfo =
+              TestOMRequestUtils.createOmDirectoryInfo(pathElement, ++objectId,
+                      parentId);
+      TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo,
+              txnID, omMetaMgr);
+      parentId = omDirInfo.getObjectID();
+    }
+    return parentId;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
index 77cf74b..f0f0320 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -88,6 +88,7 @@ public class TestOMDirectoryCreateRequestV1 {
     OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
             folder.newFolder().getAbsolutePath());
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
     when(ozoneManager.getMetrics()).thenReturn(omMetrics);
     when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
index a500f4c..a963f88 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.request.file;
 import java.util.List;
 import java.util.UUID;
 
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -55,8 +56,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
         false, false);
 
-    OMFileCreateRequest omFileCreateRequest =
-        new OMFileCreateRequest(omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
     Assert.assertNotEquals(omRequest, modifiedOmRequest);
@@ -96,8 +96,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
         false, false);
 
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
     Assert.assertNotEquals(omRequest, modifiedOmRequest);
@@ -121,21 +120,17 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
     long id = modifiedOmRequest.getCreateFileRequest().getClientID();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
-
     // Before calling
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(keyName, id, false);
     Assert.assertNull(omKeyInfo);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -146,8 +141,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
 
     // Check open table whether key is added or not.
 
-    omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-    Assert.assertNotNull(omKeyInfo);
+    omKeyInfo = verifyPathInOpenKeyTable(keyName, id, true);
 
     List< OmKeyLocationInfo > omKeyLocationInfoList =
         omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -179,12 +173,11 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
             false, true);
 
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -200,13 +193,11 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         false, true);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
-
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -311,8 +302,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
     testNonRecursivePath(key, false, false, true);
   }
 
-
-  private void testNonRecursivePath(String key,
+  protected void testNonRecursivePath(String key,
       boolean overWrite, boolean recursive, boolean fail) throws Exception {
     OMRequest omRequest = createFileRequest(volumeName, bucketName, key,
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
@@ -320,12 +310,11 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -341,10 +330,9 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
       Assert.assertTrue(omFileCreateResponse.getOMResponse().getSuccess());
       long id = modifiedOmRequest.getCreateFileRequest().getClientID();
 
-      String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-          key, id);
-      OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-      Assert.assertNotNull(omKeyInfo);
+      verifyKeyNameInCreateFileResponse(key, omFileCreateResponse);
+
+      OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(key, id, true);
 
       List< OmKeyLocationInfo > omKeyLocationInfoList =
           omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -368,6 +356,14 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
     }
   }
 
+  private void verifyKeyNameInCreateFileResponse(String key,
+      OMClientResponse omFileCreateResponse) {
+    OzoneManagerProtocolProtos.CreateFileResponse createFileResponse =
+            omFileCreateResponse.getOMResponse().getCreateFileResponse();
+    String actualFileName = createFileResponse.getKeyInfo().getKeyName();
+    Assert.assertEquals("Incorrect keyName", key, actualFileName);
+  }
+
   /**
    * Create OMRequest which encapsulates OMFileCreateRequest.
    * @param volumeName
@@ -377,7 +373,8 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
    * @param replicationType
    * @return OMRequest
    */
-  private OMRequest createFileRequest(
+  @NotNull
+  protected OMRequest createFileRequest(
       String volumeName, String bucketName, String keyName,
       HddsProtos.ReplicationFactor replicationFactor,
       HddsProtos.ReplicationType replicationType, boolean overWrite,
@@ -399,4 +396,38 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         .setCreateFileRequest(createFileRequest).build();
 
   }
+
+  /**
+   * Verify path in open key table. Also, it returns OMKeyInfo for the given
+   * key path.
+   *
+   * @param key      key name
+   * @param id       client id
+   * @param doAssert if true then do assertion, otherwise it just skip.
+   * @return om key info for the given key path.
+   * @throws Exception DB failure
+   */
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+                                               boolean doAssert)
+          throws Exception {
+    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
+            key, id);
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    if (doAssert) {
+      Assert.assertNotNull("Failed to find key in OpenKeyTable", omKeyInfo);
+    }
+    return omKeyInfo;
+  }
+
+  /**
+   * Gets OMFileCreateRequest reference.
+   *
+   * @param omRequest om request
+   * @return OMFileCreateRequest reference
+   */
+  @NotNull
+  protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest){
+    return new OMFileCreateRequest(omRequest);
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
new file mode 100644
index 0000000..7ded386
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.UUID;
+
+/**
+ * Tests OMFileCreateRequest layout version V1.
+ */
+public class TestOMFileCreateRequestV1 extends TestOMFileCreateRequest {
+
+  @Test
+  public void testValidateAndUpdateCacheWithNonRecursive() throws Exception {
+    testNonRecursivePath(UUID.randomUUID().toString(), false, false, false);
+    testNonRecursivePath("a/b", false, false, true);
+    Assert.assertEquals("Invalid metrics value", 0, omMetrics.getNumKeys());
+
+    // Create parent dirs for the path
+    TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            "a/b/c", omMetadataManager);
+    String fileNameD = "d";
+    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
+            "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+
+    // cannot create file if directory of same name exists
+    testNonRecursivePath("a/b/c", false, false, true);
+
+    // Delete child key but retain path "a/b/ in the key table
+    OmDirectoryInfo dirPathC = getDirInfo("a/b/c");
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+    String dbFileD = omMetadataManager.getOzonePathKey(
+            dirPathC.getObjectID(), fileNameD);
+    omMetadataManager.getKeyTable().delete(dbFileD);
+    omMetadataManager.getKeyTable().delete(dirPathC.getPath());
+
+    // can create non-recursive because parents already exist.
+    testNonRecursivePath("a/b/e", false, false, false);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithRecursiveAndOverWrite()
+          throws Exception {
+    String key = "c/d/e/f";
+    // Should be able to create file even if parent directories does not exist
+    testNonRecursivePath(key, false, true, false);
+    Assert.assertEquals("Invalid metrics value", 3, omMetrics.getNumKeys());
+
+    // Add the key to key table
+    OmDirectoryInfo omDirInfo = getDirInfo("c/d/e");
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    omDirInfo.getObjectID() + 10,
+                    omDirInfo.getObjectID(), 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            "f", omKeyInfo, -1,
+            omDirInfo.getObjectID() + 10, omMetadataManager);
+
+    // Even if key exists, should be able to create file as overwrite is set
+    // to true
+    testNonRecursivePath(key, true, true, false);
+    testNonRecursivePath(key, false, true, true);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite()
+          throws Exception {
+    String parentDir = "c/d/e";
+    String fileName = "f";
+    String key = parentDir + "/" + fileName;
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, parentDir, omMetadataManager);
+
+    // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is
+    // non-recursive parent should exist.
+    testNonRecursivePath(key, false, false, false);
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            fileName, omKeyInfo, -1, 50, omMetadataManager);
+
+    // Even if key exists in KeyTable, should be able to create file as
+    // overwrite is set to true
+    testNonRecursivePath(key, true, false, false);
+    testNonRecursivePath(key, false, false, true);
+  }
+
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+                                             boolean doAssert)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      if (indx == pathComponents.length - 1) {
+        String dbOpenFileName = omMetadataManager.getOpenFileName(
+                parentId, pathElement, id);
+        OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+                .get(dbOpenFileName);
+        if (doAssert) {
+          Assert.assertNotNull("Invalid key!", omKeyInfo);
+        }
+        return omKeyInfo;
+      } else {
+        // directory
+        String dbKey = omMetadataManager.getOzonePathKey(parentId,
+                pathElement);
+        OmDirectoryInfo dirInfo =
+                omMetadataManager.getDirectoryTable().get(dbKey);
+        parentId = dirInfo.getObjectID();
+      }
+    }
+    if (doAssert) {
+      Assert.fail("Invalid key!");
+    }
+    return null;
+  }
+
+  private OmDirectoryInfo getDirInfo(String key)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    OmDirectoryInfo dirInfo = null;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      // directory
+      String dbKey = omMetadataManager.getOzonePathKey(parentId,
+              pathElement);
+      dirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      parentId = dirInfo.getObjectID();
+    }
+    return dirInfo;
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+
+  protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) {
+    return new OMFileCreateRequestV1(omRequest);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
index b327b76..09d499e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
@@ -19,12 +19,15 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -48,6 +51,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
  */
 public class TestOMKeyCommitRequest extends TestOMKeyRequest {
 
+  private String parentDir;
+
   @Test
   public void testPreExecute() throws Exception {
     doPreExecute(createCommitKeyRequest());
@@ -56,20 +61,15 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
   @Test
   public void testValidateAndUpdateCache() throws Exception {
 
-    OMRequest modifiedOmRequest =
-        doPreExecute(createCommitKeyRequest());
+    OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = addKeyToOpenKeyTable();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -92,6 +92,8 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
     omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
 
     Assert.assertNotNull(omKeyInfo);
+    // DB keyInfo format
+    verifyKeyName(omKeyInfo);
 
     // Check modification time
 
@@ -107,7 +109,14 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
 
     Assert.assertEquals(locationInfoListFromCommitKeyRequest,
         omKeyInfo.getLatestVersionLocations().getLocationList());
+  }
 
+  @Test
+  public void testValidateAndUpdateCacheWithSubDirs() throws Exception {
+    parentDir = "dir1/dir2/dir3/";
+    keyName = parentDir + UUID.randomUUID().toString();
+
+    testValidateAndUpdateCache();
   }
 
   @Test
@@ -117,10 +126,9 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
+            getOmKeyCommitRequest(modifiedOmRequest);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -147,13 +155,11 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
         omMetadataManager);
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -180,14 +186,12 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -216,7 +220,7 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
   private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(originalOMRequest);
+            getOmKeyCommitRequest(originalOMRequest);
 
     OMRequest modifiedOmRequest = omKeyCommitRequest.preExecute(ozoneManager);
 
@@ -294,4 +298,34 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
     return keyLocations;
   }
 
+  protected String getParentDir() {
+    return parentDir;
+  }
+
+  @NotNull
+  protected String getOzonePathKey() throws IOException {
+    return omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+  }
+
+  @NotNull
+  protected String addKeyToOpenKeyTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
+            clientID, replicationType, replicationFactor, omMetadataManager);
+
+    return getOzonePathKey();
+  }
+
+  @NotNull
+  protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+    return new OMKeyCommitRequest(omRequest);
+  }
+
+  protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+    Assert.assertEquals("Incorrect KeyName", keyName,
+            omKeyInfo.getKeyName());
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Assert.assertEquals("Incorrect FileName", fileName,
+            omKeyInfo.getFileName());
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
new file mode 100644
index 0000000..f5168e1
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.io.IOException;
+
+/**
+ * Class tests OMKeyCommitRequestV1 class layout version V1.
+ */
+public class TestOMKeyCommitRequestV1 extends TestOMKeyCommitRequest {
+
+  private long parentID = Long.MIN_VALUE;
+
+  private long getBucketID() throws java.io.IOException {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    if(omBucketInfo!= null){
+      return omBucketInfo.getObjectID();
+    }
+    // bucket doesn't exists in DB
+    return Long.MIN_VALUE;
+  }
+
+  @Override
+  protected String getOzonePathKey() throws IOException {
+    long bucketID = getBucketID();
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return omMetadataManager.getOzonePathKey(bucketID, fileName);
+  }
+
+  @Override
+  protected String addKeyToOpenKeyTable() throws Exception {
+    // need to initialize parentID
+    if (getParentDir() == null) {
+      parentID = getBucketID();
+    } else {
+      parentID = TestOMRequestUtils.addParentsToDirTable(volumeName,
+              bucketName, getParentDir(), omMetadataManager);
+    }
+    long objectId = 100;
+
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+                    Time.now());
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+
+    return omMetadataManager.getOzonePathKey(parentID, fileName);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+
+  @NotNull
+  protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+    return new OMKeyCommitRequestV1(omRequest);
+  }
+
+  protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+    // V1 format - stores fileName in the keyName DB field.
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Assert.assertEquals("Incorrect FileName", fileName,
+            omKeyInfo.getFileName());
+    Assert.assertEquals("Incorrect KeyName", fileName,
+            omKeyInfo.getKeyName());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
index 7bf43a7..4bf66bb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.om.KeyManagerImpl;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -91,6 +92,7 @@ public class TestOMKeyRequest {
   protected long scmBlockSize = 1000L;
   protected long dataSize;
   protected Random random;
+  protected long txnLogId = 100000L;
 
   // Just setting ozoneManagerDoubleBuffer which does nothing.
   protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
@@ -103,7 +105,7 @@ public class TestOMKeyRequest {
   public void setup() throws Exception {
     ozoneManager = Mockito.mock(OzoneManager.class);
     omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    OzoneConfiguration ozoneConfiguration = getOzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
         folder.newFolder().getAbsolutePath());
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
@@ -172,6 +174,11 @@ public class TestOMKeyRequest {
         .thenReturn(new ResolvedBucket(volumeAndBucket, volumeAndBucket));
   }
 
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @After
   public void stop() {
     omMetrics.unRegister();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
new file mode 100644
index 0000000..19a1bb9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
+import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests MKeyCreateResponse layout version V1.
+ */
+public class TestOMFileCreateResponseV1 extends TestOMKeyCreateResponse {
+
+  @NotNull
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(omBucketInfo);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
+  }
+
+  @NotNull
+  @Override
+  protected String getOpenKeyName() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOpenFileName(
+            omBucketInfo.getObjectID(), keyName, clientID);
+  }
+
+  @NotNull
+  @Override
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMFileCreateResponseV1(response, keyInfo, null, clientID,
+            bucketInfo);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
index 2d63ebd..4d50337 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -30,17 +31,20 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 /**
  * Tests OMKeyCommitResponse.
  */
+@SuppressWarnings("visibilitymodifier")
 public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
+  protected OmBucketInfo omBucketInfo;
+
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse(
             OzoneManagerProtocolProtos.CommitKeyResponse.getDefaultInstance())
@@ -50,17 +54,14 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
     // As during commit Key, entry will be already there in openKeyTable.
     // Adding it here.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKeyName();
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-    OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
-        omResponse, omKeyInfo, ozoneKey, openKey, omBucketInfo);
+    String ozoneKey = getOzoneKey();
+    OMKeyCommitResponse omKeyCommitResponse = getOmKeyCommitResponse(
+            omKeyInfo, omResponse, openKey, ozoneKey);
 
     omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation);
 
@@ -69,8 +70,7 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
     // When key commit key is deleted from openKey table and added to keyTable.
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
   }
 
   @Test
@@ -78,7 +78,7 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
     OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
         bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
@@ -89,18 +89,15 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
             .setCmdType(OzoneManagerProtocolProtos.Type.CommitKey)
             .build();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String openKey = getOpenKeyName();
+    String ozoneKey = getOzoneKey();
 
-    OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
-        omResponse, omKeyInfo, ozoneKey, openKey, omBucketInfo);
+    OMKeyCommitResponse omKeyCommitResponse = getOmKeyCommitResponse(
+            omKeyInfo, omResponse, openKey, ozoneKey);
 
     // As during commit Key, entry will be already there in openKeyTable.
     // Adding it here.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable();
 
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
@@ -113,7 +110,28 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
     // As omResponse is error it is a no-op. So, entry should still be in
     // openKey table.
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey));
+  }
+
+  @NotNull
+  protected void addKeyToOpenKeyTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
+            clientID, replicationType, replicationFactor, omMetadataManager);
+  }
+
+  @NotNull
+  protected String getOzoneKey() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOzoneKey(volumeName,
+            omBucketInfo.getBucketName(), keyName);
+  }
+
+  @NotNull
+  protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo,
+          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+          String ozoneKey) {
+    Assert.assertNotNull(omBucketInfo);
+    return new OMKeyCommitResponse(omResponse, omKeyInfo, ozoneKey, openKey,
+            omBucketInfo);
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
new file mode 100644
index 0000000..369faa9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests OMKeyCommitResponse layout version V1.
+ */
+public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
+
+  @NotNull
+  protected OMKeyCommitResponse getOmKeyCommitResponse(
+          OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
+          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+          String ozoneKey) {
+    Assert.assertNotNull(omBucketInfo);
+    return new OMKeyCommitResponseV1(
+            omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
+            omBucketInfo);
+  }
+
+  @NotNull
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(omBucketInfo);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
+  }
+
+  @NotNull
+  @Override
+  protected void addKeyToOpenKeyTable() throws Exception {
+    Assert.assertNotNull(omBucketInfo);
+    long parentID = omBucketInfo.getObjectID();
+    long objectId = parentID + 10;
+
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+                    Time.now());
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+  }
+
+  @NotNull
+  @Override
+  protected String getOpenKeyName() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOpenFileName(
+            omBucketInfo.getObjectID(), keyName, clientID);
+  }
+
+  @NotNull
+  @Override
+  protected String getOzoneKey() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOzonePathKey(omBucketInfo.getObjectID(),
+            keyName);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
index 4bef2ef..7566afb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
@@ -20,11 +20,11 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .CreateKeyResponse;
@@ -40,13 +40,12 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
                 CreateKeyResponse.getDefaultInstance())
             .setStatus(OzoneManagerProtocolProtos.Status.OK)
@@ -54,11 +53,11 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
             .build();
 
     OMKeyCreateResponse omKeyCreateResponse =
-        new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
-            omBucketInfo);
+            getOmKeyCreateResponse(omKeyInfo, omBucketInfo,
+                    omResponse);
+
+    String openKey = getOpenKeyName();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
     omKeyCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
 
@@ -70,13 +69,13 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
 
   @Test
   public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
 
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
         CreateKeyResponse.getDefaultInstance())
         .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
@@ -84,12 +83,11 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
         .build();
 
     OMKeyCreateResponse omKeyCreateResponse =
-        new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
-            omBucketInfo);
+            getOmKeyCreateResponse(omKeyInfo, omBucketInfo,
+                    omResponse);
 
     // Before calling addToDBBatch
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKeyName();
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
     omKeyCreateResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
@@ -101,4 +99,12 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
   }
+
+  @NotNull
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMKeyCreateResponse(response, keyInfo, null, clientID,
+            bucketInfo);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
index 312fcaf..df7fd01 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
@@ -21,6 +21,10 @@ package org.apache.hadoop.ozone.om.response.key;
 import java.util.Random;
 import java.util.UUID;
 
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -50,12 +54,14 @@ public class TestOMKeyResponse {
   protected String keyName;
   protected HddsProtos.ReplicationFactor replicationFactor;
   protected HddsProtos.ReplicationType replicationType;
+  protected OmBucketInfo omBucketInfo;
   protected long clientID;
   protected Random random;
+  protected long txnLogId = 100000L;
 
   @Before
   public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    OzoneConfiguration ozoneConfiguration = getOzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
         folder.newFolder().getAbsolutePath());
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
@@ -70,6 +76,23 @@ public class TestOMKeyResponse {
     random = new Random();
   }
 
+  @NotNull
+  protected String getOpenKeyName() {
+    return omMetadataManager.getOpenKey(volumeName, bucketName, keyName,
+            clientID);
+  }
+
+  @NotNull
+  protected OmKeyInfo getOmKeyInfo() {
+    return TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+            replicationType, replicationFactor);
+  }
+
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @After
   public void stop() {
     Mockito.framework().clearInlineMocks();


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 13/19: HDDS-4486. Feature Config: Make proper enableFSPaths and OFS optimized flag combinations (#1848)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 91733a9205b5bf6e1435f11ce8320c820abf6a64
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Thu Jan 28 15:50:48 2021 +0530

    HDDS-4486. Feature Config: Make proper enableFSPaths and OFS optimized flag combinations (#1848)
---
 .../apache/hadoop/fs/ozone/TestOzoneDirectory.java |   4 +-
 .../hadoop/fs/ozone/TestOzoneFileInterfacesV1.java |   6 +-
 .../apache/hadoop/fs/ozone/TestOzoneFileOps.java   |   5 +-
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  13 ++-
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     |  66 +++---------
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |   9 +-
 .../ozone/freon/TestHadoopDirTreeGeneratorV1.java  |   4 +-
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  |   3 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |   8 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |   4 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  17 ++-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  35 ++++---
 .../om/request/bucket/OMBucketCreateRequest.java   |  28 +++--
 .../ozone/om/request/TestOMRequestUtils.java       |  42 ++++++++
 .../ozone/om/request/bucket/TestBucketRequest.java |   1 +
 .../request/bucket/TestOMBucketCreateRequest.java  |   2 +-
 .../bucket/TestOMBucketCreateRequestV1.java        | 114 +++++++++++++++++++++
 .../file/TestOMDirectoryCreateRequestV1.java       |   3 +-
 .../om/request/file/TestOMFileCreateRequestV1.java |   4 +-
 .../request/key/TestOMAllocateBlockRequestV1.java  |   4 +-
 .../om/request/key/TestOMKeyCommitRequestV1.java   |   4 +-
 .../om/request/key/TestOMKeyCreateRequestV1.java   |   4 +-
 .../om/request/key/TestOMKeyDeleteRequestV1.java   |   4 +-
 .../response/file/TestOMFileCreateResponseV1.java  |   4 +-
 .../key/TestOMAllocateBlockResponseV1.java         |   4 +-
 .../om/response/key/TestOMKeyCommitResponseV1.java |   4 +-
 .../om/response/key/TestOMKeyCreateResponseV1.java |   4 +-
 .../om/response/key/TestOMKeyDeleteResponseV1.java |   4 +-
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |  15 ++-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  10 +-
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |   5 +-
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java |   2 +-
 32 files changed, 288 insertions(+), 148 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
index 56c6177..22ed13e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -149,7 +150,8 @@ public class TestOzoneDirectory {
           throws IOException, TimeoutException, InterruptedException {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setNumDatanodes(3)
             .build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
index 93473be..d716457 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.ozone;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.jetbrains.annotations.NotNull;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -44,9 +45,8 @@ public class TestOzoneFileInterfacesV1 extends TestOzoneFileInterfaces {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-            enableFileSystemPaths);
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            enableFileSystemPaths, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     return conf;
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
index d097268..12dd51e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -73,8 +74,8 @@ public class TestOzoneFileOps {
           throws IOException, TimeoutException, InterruptedException {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, false);
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setNumDatanodes(3)
             .build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index bf9e09f..685aade 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.TrashPolicyOzone;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -114,6 +115,7 @@ public class TestOzoneFileSystem {
       }
     }
   }
+
   /**
    * Set a timeout for each test.
    */
@@ -124,6 +126,8 @@ public class TestOzoneFileSystem {
       LoggerFactory.getLogger(TestOzoneFileSystem.class);
 
   @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static boolean isBucketFSOptimized = false;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
   protected static boolean enabledFileSystemPaths;
   @SuppressWarnings("checkstyle:VisibilityModifier")
   protected static boolean omRatisEnabled;
@@ -147,8 +151,13 @@ public class TestOzoneFileSystem {
     conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
     conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
     conf.setBoolean(OZONE_ACL_ENABLED, true);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-            enabledFileSystemPaths);
+    if (isBucketFSOptimized) {
+      TestOMRequestUtils.configureFSOptimizedPaths(conf,
+              enabledFileSystemPaths, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+    } else {
+      conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+              enabledFileSystemPaths);
+    }
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setNumDatanodes(3)
             .build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index e574e94..ffeb5a3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -18,26 +18,15 @@
 
 package org.apache.hadoop.fs.ozone;
 
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.junit.Assert;
 import org.junit.After;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Rule;
@@ -51,9 +40,9 @@ import org.slf4j.LoggerFactory;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.fail;
@@ -66,49 +55,24 @@ import static org.junit.Assert.fail;
 @RunWith(Parameterized.class)
 public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
 
-  @Ignore("TODO:HDDS-2939")
-  @BeforeClass
-  public static void init() throws Exception {
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(
+            new Object[]{true, true},
+            new Object[]{true, false},
+            new Object[]{false, true},
+            new Object[]{false, false});
+  }
 
+  @BeforeClass
+  public static void init() {
+    isBucketFSOptimized = true;
   }
 
   public TestOzoneFileSystemV1(boolean setDefaultFs, boolean enableOMRatis) {
     super(setDefaultFs, enableOMRatis);
   }
 
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-            enabledFileSystemPaths);
-    if (enabledFileSystemPaths) {
-      conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
-    }
-    cluster = MiniOzoneCluster.newBuilder(conf)
-            .setNumDatanodes(3)
-            .build();
-    cluster.waitForClusterToBeReady();
-
-    // create a volume and a bucket to be used by OzoneFileSystem
-    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
-    volumeName = bucket.getVolumeName();
-    bucketName = bucket.getName();
-
-    String rootPath = String.format("%s://%s.%s/",
-            OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
-
-    // Set the fs.defaultFS and start the filesystem
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
-    // Set the number of keys to be processed during batch operate.
-    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
-
-    fs = FileSystem.get(conf);
-    trash = new Trash(conf);
-    o3fs = (OzoneFileSystem) fs;
-  }
-
   @After
   @Override
   public void cleanup() {
@@ -119,10 +83,6 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
       LOG.info("Failed to cleanup DB tables.", e);
       fail("Failed to cleanup DB tables." + e.getMessage());
     }
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.closeQuietly(fs);
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index ce2f10b..36146a2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 
 import org.junit.After;
 import org.junit.Assert;
@@ -102,7 +103,9 @@ public class TestReadRetries {
 
   @Parameterized.Parameters
   public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[]{"V0"}, new Object[]{"V1"});
+    return Arrays.asList(
+            new Object[]{OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT},
+            new Object[]{OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1});
   }
 
   /**
@@ -113,8 +116,8 @@ public class TestReadRetries {
   public void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, layoutVersion);
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, layoutVersion);
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .setScmId(SCM_ID)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
index 99d4f26..01a73bc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
@@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.freon;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 
 /**
  * Test for HadoopDirTreeGenerator layout version V1.
@@ -26,7 +27,8 @@ public class TestHadoopDirTreeGeneratorV1 extends TestHadoopDirTreeGenerator {
 
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     return conf;
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index d343e2c..d09020e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -88,7 +88,8 @@ public class TestObjectStoreV1 {
     clusterId = UUID.randomUUID().toString();
     scmId = UUID.randomUUID().toString();
     omId = UUID.randomUUID().toString();
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setClusterId(clusterId)
             .setScmId(scmId)
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 055ab13..604f7d2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -657,7 +657,7 @@ public class KeyManagerImpl implements KeyManager {
         bucketName);
     OmKeyInfo value = null;
     try {
-      if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
         value = getOmKeyInfoV1(volumeName, bucketName, keyName);
       } else {
         value = getOmKeyInfo(volumeName, bucketName, keyName);
@@ -1802,7 +1802,7 @@ public class KeyManagerImpl implements KeyManager {
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
 
-    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       return getOzoneFileStatusV1(volumeName, bucketName, keyName,
               args.getSortDatanodes(), clientAddress, false);
     }
@@ -2072,7 +2072,7 @@ public class KeyManagerImpl implements KeyManager {
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
     OzoneFileStatus fileStatus;
-    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       fileStatus = getOzoneFileStatusV1(volumeName, bucketName, keyName,
               args.getSortDatanodes(), clientAddress, false);
     } else {
@@ -2180,7 +2180,7 @@ public class KeyManagerImpl implements KeyManager {
       return fileStatusList;
     }
 
-    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       return listStatusV1(args, recursive, startKey, numEntries, clientAddress);
     }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 22bbc20..3ac234a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -240,7 +240,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmKeyInfo> getKeyTable() {
-    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       return fileTable;
     }
     return keyTable;
@@ -253,7 +253,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmKeyInfo> getOpenKeyTable() {
-    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       return openFileTable;
     }
     return openKeyTable;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 30c1d37..51775ef 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -227,6 +227,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_F
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
@@ -1163,7 +1164,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     }
 
     // TODO: Temporary workaround for OM upgrade path and will be replaced once
-    //  upgrade HDDS-3698 story reaches consensus.
+    //  upgrade HDDS-3698 story reaches consensus. Instead of cluster level
+    //  configuration, OM needs to check this property on every bucket level.
     getOMLayoutVersion();
 
     metadataManager.start(configuration);
@@ -3708,14 +3710,19 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT);
   }
 
-  private void getOMLayoutVersion() {
+  public String getOMLayoutVersion() {
     String version = configuration.getTrimmed(OZONE_OM_LAYOUT_VERSION,
             OZONE_OM_LAYOUT_VERSION_DEFAULT);
-    boolean omLayoutVersionV1 =
-            StringUtils.equalsIgnoreCase(version, "V1");
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(omLayoutVersionV1);
+    boolean omLayoutVersionV1 = StringUtils.equalsIgnoreCase(version,
+            OZONE_OM_LAYOUT_VERSION_V1);
     LOG.info("Configured {}={} and enabled:{} optimized OM FS operations",
             OZONE_OM_LAYOUT_VERSION, version, omLayoutVersionV1);
+
+    boolean isBucketFSOptimized =
+            omLayoutVersionV1 && getEnableFileSystemPaths();
+    OzoneManagerRatisUtils.setBucketFSOptimized(isBucketFSOptimized);
+
+    return version;
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index c469c2c..26fded2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -92,18 +92,19 @@ public final class OzoneManagerRatisUtils {
 
   // TODO: Temporary workaround for OM upgrade path and will be replaced once
   //  upgrade HDDS-3698 story reaches consensus.
-  private static boolean omLayoutVersionV1 = false;
+  private static boolean isBucketFSOptimized = false;
 
   private OzoneManagerRatisUtils() {
   }
 
   /**
-   * Sets layout version.
+   * Sets enabled/disabled file system optimized path property. A true value
+   * represents enabled, false represents disabled.
    *
-   * @param layoutVersionV1 om layout version
+   * @param enabledFSO enabled/disabled file system optimized
    */
-  public static void setOmLayoutVersionV1(boolean layoutVersionV1) {
-    OzoneManagerRatisUtils.omLayoutVersionV1 = layoutVersionV1;
+  public static void setBucketFSOptimized(boolean enabledFSO) {
+    OzoneManagerRatisUtils.isBucketFSOptimized = enabledFSO;
   }
 
   /**
@@ -140,41 +141,41 @@ public final class OzoneManagerRatisUtils {
     case SetBucketProperty:
       return new OMBucketSetPropertyRequest(omRequest);
     case AllocateBlock:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMAllocateBlockRequestV1(omRequest);
       }
       return new OMAllocateBlockRequest(omRequest);
     case CreateKey:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMKeyCreateRequestV1(omRequest);
       }
       return new OMKeyCreateRequest(omRequest);
     case CommitKey:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMKeyCommitRequestV1(omRequest);
       }
       return new OMKeyCommitRequest(omRequest);
     case DeleteKey:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMKeyDeleteRequestV1(omRequest);
       }
       return new OMKeyDeleteRequest(omRequest);
     case DeleteKeys:
       return new OMKeysDeleteRequest(omRequest);
     case RenameKey:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMKeyRenameRequestV1(omRequest);
       }
       return new OMKeyRenameRequest(omRequest);
     case RenameKeys:
       return new OMKeysRenameRequest(omRequest);
     case CreateDirectory:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMDirectoryCreateRequestV1(omRequest);
       }
       return new OMDirectoryCreateRequest(omRequest);
     case CreateFile:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMFileCreateRequestV1(omRequest);
       }
       return new OMFileCreateRequest(omRequest);
@@ -352,11 +353,13 @@ public final class OzoneManagerRatisUtils {
   }
 
   /**
-   * Returns layout version flag represents V1.
-   * @return
+   * Returns enabled/disabled file system optimized path property. A true value
+   * represents FSO path is enabled, false represents disabled.
+   *
+   * @return true or false.
    */
-  public static boolean isOmLayoutVersionV1() {
-    return omLayoutVersionV1;
+  public static boolean isBucketFSOptimized() {
+    return isBucketFSOptimized;
   }
 
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index d3b3f61..8b11770 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -27,7 +27,7 @@ import java.util.stream.Collectors;
 
 import com.google.common.base.Optional;
 
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -160,7 +160,7 @@ public class OMBucketCreateRequest extends OMClientRequest {
     OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
 
     // Add layout version V1 to bucket info
-    addLayoutVersionToBucket(ozoneManager, omBucketInfo);
+    addFSOptimizedBucketDetails(ozoneManager, omBucketInfo);
 
     AuditLogger auditLogger = ozoneManager.getAuditLogger();
     OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
@@ -367,20 +367,30 @@ public class OMBucketCreateRequest extends OMClientRequest {
 
   }
 
-  private void addLayoutVersionToBucket(OzoneManager ozoneManager,
-                                        OmBucketInfo omBucketInfo) {
+  /**
+   * OM can support FS optimization only if both are flags are TRUE
+   * (enableFSOptimized=true && enableFSPaths=true) and will write table key
+   * entries in NEW_FORMAT(prefix separated format using objectID). All the
+   * other cases, it will
+   * write table key entries in OLD_FORMAT(existing format).
+   *
+   * @param ozoneManager ozone manager
+   * @param omBucketInfo bucket information
+   */
+  private void addFSOptimizedBucketDetails(OzoneManager ozoneManager,
+                                           OmBucketInfo omBucketInfo) {
     Map<String, String> metadata = omBucketInfo.getMetadata();
     if (metadata == null) {
       metadata = new HashMap<>();
     }
-    OzoneConfiguration configuration = ozoneManager.getConfiguration();
     // TODO: Many unit test cases has null config and done a simple null
     //  check now. It can be done later, to avoid massive test code changes.
-    if (configuration != null) {
-      String layOutVersion = configuration
-              .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION,
-                      OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT);
+    if(StringUtils.isNotBlank(ozoneManager.getOMLayoutVersion())){
+      String layOutVersion = ozoneManager.getOMLayoutVersion();
       metadata.put(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, layOutVersion);
+      boolean fsPathsEnabled = ozoneManager.getEnableFileSystemPaths();
+      metadata.put(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+              Boolean.toString(fsPathsEnabled));
       omBucketInfo.setMetadata(metadata);
     }
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index 61fd676..eca06f9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -26,6 +26,7 @@ import java.util.List;
 import java.util.UUID;
 
 import com.google.common.base.Optional;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -430,6 +432,25 @@ public final class TestOMRequestUtils {
         .setClientId(UUID.randomUUID().toString()).build();
   }
 
+  public static OzoneManagerProtocolProtos.OMRequest createBucketRequestV1(
+          String bucketName, String volumeName, boolean isVersionEnabled,
+          OzoneManagerProtocolProtos.StorageTypeProto storageTypeProto) {
+    OzoneManagerProtocolProtos.BucketInfo bucketInfo =
+            OzoneManagerProtocolProtos.BucketInfo.newBuilder()
+                    .setBucketName(bucketName)
+                    .setVolumeName(volumeName)
+                    .setIsVersionEnabled(isVersionEnabled)
+                    .setStorageType(storageTypeProto)
+                    .addAllMetadata(getMetadataListV1()).build();
+    OzoneManagerProtocolProtos.CreateBucketRequest.Builder req =
+            OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder();
+    req.setBucketInfo(bucketInfo);
+    return OzoneManagerProtocolProtos.OMRequest.newBuilder()
+            .setCreateBucketRequest(req)
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
+            .setClientId(UUID.randomUUID().toString()).build();
+  }
+
   public static List< HddsProtos.KeyValue> getMetadataList() {
     List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
     metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
@@ -439,6 +460,20 @@ public final class TestOMRequestUtils {
     return metadataList;
   }
 
+  public static List< HddsProtos.KeyValue> getMetadataListV1() {
+    List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
+            "value1").build());
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue(
+            "value2").build());
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey(
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION).setValue(
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1).build());
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey(
+            OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS).setValue(
+            "false").build());
+    return metadataList;
+  }
 
   /**
    * Add user to user table.
@@ -927,4 +962,11 @@ public final class TestOMRequestUtils {
     }
     return parentId;
   }
+
+  public static void configureFSOptimizedPaths(Configuration conf,
+      boolean enableFileSystemPaths, String version) {
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+            enableFileSystemPaths);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, version);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
index 7ae82f8..9381746 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
@@ -74,6 +74,7 @@ public class TestBucketRequest {
     auditLogger = Mockito.mock(AuditLogger.class);
     when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
     Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+    when(ozoneManager.getOMLayoutVersion()).thenReturn(null);
   }
 
   @After
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
index 06e140b..3615ce4 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
@@ -186,7 +186,7 @@ public class TestOMBucketCreateRequest extends TestBucketRequest {
 
   }
 
-  private void verifyRequest(OMRequest modifiedOmRequest,
+  protected void verifyRequest(OMRequest modifiedOmRequest,
       OMRequest originalRequest) {
     OzoneManagerProtocolProtos.BucketInfo original =
         originalRequest.getCreateBucketRequest().getBucketInfo();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestV1.java
new file mode 100644
index 0000000..a3b6ff7
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestV1.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.bucket;
+
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.UUID;
+
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests OMBucketCreateRequest class, which handles CreateBucket request.
+ */
+public class TestOMBucketCreateRequestV1 extends TestOMBucketCreateRequest {
+
+  @Test
+  public void testValidateAndUpdateCacheWithFSOBucket() throws Exception {
+    when(ozoneManager.getOMLayoutVersion()).thenReturn(
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+
+    OMBucketCreateRequest omBucketCreateRequest = doPreExecute(volumeName,
+        bucketName);
+
+    doValidateAndUpdateCache(volumeName, bucketName,
+        omBucketCreateRequest.getOmRequest());
+  }
+
+  private OMBucketCreateRequest doPreExecute(String volumeName,
+      String bucketName) throws Exception {
+    addCreateVolumeToTable(volumeName, omMetadataManager);
+    OMRequest originalRequest =
+        TestOMRequestUtils.createBucketRequestV1(bucketName, volumeName,
+                false, StorageTypeProto.SSD);
+
+    OMBucketCreateRequest omBucketCreateRequest =
+        new OMBucketCreateRequest(originalRequest);
+
+    OMRequest modifiedRequest = omBucketCreateRequest.preExecute(ozoneManager);
+    verifyRequest(modifiedRequest, originalRequest);
+    return new OMBucketCreateRequest(modifiedRequest);
+  }
+
+  private void doValidateAndUpdateCache(String volumeName, String bucketName,
+      OMRequest modifiedRequest) throws Exception {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+
+    // As we have not still called validateAndUpdateCache, get() should
+    // return null.
+
+    Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey));
+    OMBucketCreateRequest omBucketCreateRequest =
+        new OMBucketCreateRequest(modifiedRequest);
+
+
+    OMClientResponse omClientResponse =
+        omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
+            ozoneManagerDoubleBufferHelper);
+
+    // As now after validateAndUpdateCache it should add entry to cache, get
+    // should return non null value.
+    OmBucketInfo dbBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    Assert.assertNotNull(omMetadataManager.getBucketTable().get(bucketKey));
+
+    // verify table data with actual request data.
+    OmBucketInfo bucketInfoFromProto = OmBucketInfo.getFromProtobuf(
+        modifiedRequest.getCreateBucketRequest().getBucketInfo());
+
+    Assert.assertEquals(bucketInfoFromProto.getCreationTime(),
+        dbBucketInfo.getCreationTime());
+    Assert.assertEquals(bucketInfoFromProto.getModificationTime(),
+        dbBucketInfo.getModificationTime());
+    Assert.assertEquals(bucketInfoFromProto.getAcls(),
+        dbBucketInfo.getAcls());
+    Assert.assertEquals(bucketInfoFromProto.getIsVersionEnabled(),
+        dbBucketInfo.getIsVersionEnabled());
+    Assert.assertEquals(bucketInfoFromProto.getStorageType(),
+        dbBucketInfo.getStorageType());
+    Assert.assertEquals(bucketInfoFromProto.getMetadata(),
+        dbBucketInfo.getMetadata());
+    Assert.assertEquals(bucketInfoFromProto.getEncryptionKeyInfo(),
+        dbBucketInfo.getEncryptionKeyInfo());
+
+    // verify OMResponse.
+    verifySuccessCreateBucketResponse(omClientResponse.getOMResponse());
+
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
index f0f0320..454cfbb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -88,7 +88,8 @@ public class TestOMDirectoryCreateRequestV1 {
     OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
             folder.newFolder().getAbsolutePath());
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    TestOMRequestUtils.configureFSOptimizedPaths(ozoneConfiguration,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
     when(ozoneManager.getMetrics()).thenReturn(omMetrics);
     when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
index 046ac90..2631c91 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.om.request.file;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -183,11 +182,10 @@ public class TestOMFileCreateRequestV1 extends TestOMFileCreateRequest {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java
index 4e74979..5d21226 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java
@@ -23,7 +23,6 @@ package org.apache.hadoop.ozone.om.request.key;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -43,11 +42,10 @@ public class TestOMAllocateBlockRequestV1 extends TestOMAllocateBlockRequest {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
index ed1e2bd..379dbcf 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.request.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
@@ -87,11 +86,10 @@ public class TestOMKeyCommitRequestV1 extends TestOMKeyCommitRequest {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
index 83c640d..b65443d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.request.key;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -47,11 +46,10 @@ public class TestOMKeyCreateRequestV1 extends TestOMKeyCreateRequest {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
index 7527e78..2c43d51 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.om.request.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
@@ -61,11 +60,10 @@ public class TestOMKeyDeleteRequestV1 extends TestOMKeyDeleteRequest {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
index bc4345e..e1549e1 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.ozone.om.response.file;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -68,11 +67,10 @@ public class TestOMFileCreateResponseV1 extends TestOMKeyCreateResponse {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
index 92b3efe..1626079 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.response.key;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -73,11 +72,10 @@ public class TestOMAllocateBlockResponseV1
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
index 4d68a4b..7275f69 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
@@ -95,11 +94,10 @@ public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
index e51a06b..6299639 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -38,11 +37,10 @@ public class TestOMKeyCreateResponseV1 extends TestOMKeyCreateResponse {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
index d35c79e..d46fe72 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
@@ -76,11 +75,10 @@ public class TestOMKeyDeleteResponseV1 extends TestOMKeyDeleteResponse {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index b4153f0..150108c 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -547,7 +547,18 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
   }
 
   @Override
-  public String getBucketLayoutVersion() {
-    return bucket.getMetadata().get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION);
+  public boolean isFSOptimizedBucket() {
+    // layout version V1 represents optimized FS path
+    boolean layoutVersionEnabled =
+            StringUtils.equalsIgnoreCase(
+                    OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1,
+                    bucket.getMetadata()
+                            .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION));
+
+    boolean fsEnabled =
+            Boolean.parseBoolean(bucket.getMetadata()
+                    .get(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS));
+
+    return layoutVersionEnabled && fsEnabled;
   }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 2f7b589..d2b9a6d 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -327,9 +326,7 @@ public class BasicOzoneFileSystem extends FileSystem {
       return false;
     }
 
-    String layOutVersion = adapter.getBucketLayoutVersion();
-    if (layOutVersion != null &&
-            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) {
+    if (adapter.isFSOptimizedBucket()) {
       return renameV1(srcPath, dstPath);
     }
 
@@ -509,10 +506,7 @@ public class BasicOzoneFileSystem extends FileSystem {
     statistics.incrementWriteOps(1);
     LOG.debug("Delete path {} - recursive {}", f, recursive);
 
-    String layOutVersion = adapter.getBucketLayoutVersion();
-    if (layOutVersion != null &&
-            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) {
-
+    if (adapter.isFSOptimizedBucket()) {
       if (f.isRoot()) {
         LOG.warn("Cannot delete root directory.");
         return false;
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 84cba47..6fdac47 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -59,7 +59,6 @@ import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -1049,8 +1048,8 @@ public class BasicRootedOzoneClientAdapterImpl
   }
 
   @Override
-  public String getBucketLayoutVersion() {
+  public boolean isFSOptimizedBucket() {
     // TODO: Need to refine this part.
-    return OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
+    return false;
   }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
index 4a4d91b..0258f69 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
@@ -77,5 +77,5 @@ public interface OzoneClientAdapter {
   FileStatusAdapter getFileStatus(String key, URI uri,
       Path qualifiedPath, String userName) throws IOException;
 
-  String getBucketLayoutVersion();
+  boolean isFSOptimizedBucket();
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 06/19: HDDS-4321. Fix compilation errors : merge HDDS-4308 and HDDS-4473 changes into the branch (#1668)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit d7a38b0f266f089c15fda9ac85bd3261f113f88c
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Dec 9 13:45:35 2020 +0530

    HDDS-4321. Fix compilation errors : merge HDDS-4308 and HDDS-4473 changes into the branch (#1668)
---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java    | 19 +++++++++----------
 .../ozone/om/request/file/OMFileCreateRequestV1.java  |  2 +-
 .../ozone/om/request/key/OMKeyCommitRequestV1.java    |  3 ++-
 .../ozone/om/request/key/OMKeyDeleteRequestV1.java    |  7 ++++---
 4 files changed, 16 insertions(+), 15 deletions(-)

diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 46ffce8..dc70369 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2427,20 +2427,19 @@ public class KeyManagerImpl implements KeyManager {
       metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
               bucketName);
     }
+    List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
     for (OzoneFileStatus fileStatus : fileStatusList) {
       if (fileStatus.isFile()) {
-        // refreshPipeline flag check has been removed as part of
-        // https://issues.apache.org/jira/browse/HDDS-3658.
-        // Please refer this jira for more details.
-        refresh(fileStatus.getKeyInfo());
-
-        // No need to check if a key is deleted or not here, this is handled
-        // when adding entries to cacheKeyMap from DB.
-        if (args.getSortDatanodes()) {
-          sortDatanodes(clientAddress, fileStatus.getKeyInfo());
-        }
+        keyInfoList.add(fileStatus.getKeyInfo());
       }
     }
+    // refreshPipeline flag check has been removed as part of
+    // https://issues.apache.org/jira/browse/HDDS-3658.
+    // Please refer this jira for more details.
+    refreshPipeline(keyInfoList);
+    if (args.getSortDatanodes()) {
+      sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
+    }
     fileStatusFinalList.addAll(fileStatusList);
     return fileStatusFinalList;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
index cabd407..606e15b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -214,7 +214,7 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
           .setOpenVersion(openVersion).build())
           .setCmdType(Type.CreateFile);
       omClientResponse = new OMFileCreateResponseV1(omResponse.build(),
-              omFileInfo, missingParentInfos, clientID, omBucketInfo);
+              omFileInfo, missingParentInfos, clientID, omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
index d7ffa2d..66b28ae 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -170,7 +170,8 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
       omBucketInfo.incrUsedBytes(correctedSpace);
 
       omClientResponse = new OMKeyCommitResponseV1(omResponse.build(),
-              omKeyInfo, dbFileKey, dbOpenFileKey, omVolumeArgs, omBucketInfo);
+              omKeyInfo, dbFileKey, dbOpenFileKey, omVolumeArgs,
+              omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
index 93531bc..af3bc82 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
@@ -147,6 +147,7 @@ public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest {
 
       omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
 
+      // TODO: HDDS-4565: consider all the sub-paths if the path is a dir.
       long quotaReleased = sumBlockLengths(omKeyInfo);
       omBucketInfo.incrUsedBytes(-quotaReleased);
       omBucketInfo.incrUsedNamespace(-1L);
@@ -157,9 +158,9 @@ public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest {
       // TODO: Revisit if we need it later.
 
       omClientResponse = new OMKeyDeleteResponseV1(omResponse
-          .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
-          omKeyInfo, ozoneManager.isRatisEnabled(),
-          omBucketInfo, keyStatus.isDirectory());
+              .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
+              omKeyInfo, ozoneManager.isRatisEnabled(),
+              omBucketInfo.copyObject(), keyStatus.isDirectory());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 16/19: HDDS-4781. [FSO]S3MultiPart: Implement create and commit upload part file (#1897)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit ee4fd7d8050f41b7b433b22b87e9849594a84afb
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Feb 10 15:00:32 2021 +0530

    HDDS-4781. [FSO]S3MultiPart: Implement create and commit upload part file (#1897)
---
 .../apache/hadoop/fs/ozone/TestOzoneFileOps.java   |  67 ------
 .../rpc/TestOzoneClientMultipartUploadV1.java      |  93 +++++++++
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |  14 +-
 .../S3InitiateMultipartUploadRequest.java          |  12 +-
 .../S3InitiateMultipartUploadRequestV1.java        |  25 +--
 .../S3MultipartUploadCommitPartRequest.java        |  35 +++-
 ...a => S3MultipartUploadCommitPartRequestV1.java} | 114 ++++-------
 .../S3MultipartUploadCommitPartResponseV1.java     |  66 ++++++
 .../s3/multipart/TestS3MultipartRequest.java       |  14 +-
 .../TestS3MultipartUploadCommitPartRequest.java    |  62 ++++--
 .../TestS3MultipartUploadCommitPartRequestV1.java  | 104 ++++++++++
 .../s3/multipart/TestS3MultipartResponse.java      |  76 +++++++
 .../TestS3MultipartUploadCommitPartResponseV1.java | 226 +++++++++++++++++++++
 14 files changed, 712 insertions(+), 200 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
index 176d0c4..147a9ce 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.TestDataUtil;
@@ -37,7 +35,6 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Assert;
@@ -125,34 +122,6 @@ public class TestOzoneFileOps {
             omMgr);
     openFileKey = d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
 
-    // verify entries in directory table
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmDirectoryInfo>> iterator =
-            omMgr.getDirectoryTable().iterator();
-    iterator.seekToFirst();
-    int count = dirKeys.size();
-    Assert.assertEquals("Unexpected directory table entries!", 2, count);
-    while (iterator.hasNext()) {
-      count--;
-      Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
-      verifyKeyFormat(value.getKey(), dirKeys);
-    }
-    Assert.assertEquals("Unexpected directory table entries!", 0, count);
-
-    // verify entries in open key table
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmKeyInfo>> keysItr =
-            omMgr.getOpenKeyTable().iterator();
-    keysItr.seekToFirst();
-
-    while (keysItr.hasNext()) {
-      count++;
-      Table.KeyValue<String, OmKeyInfo> value = keysItr.next();
-      verifyOpenKeyFormat(value.getKey(), openFileKey);
-      verifyOMFileInfoFormat(value.getValue(), file.getName(), d2ObjectID);
-    }
-    Assert.assertEquals("Unexpected file table entries!", 1, count);
-
     // trigger CommitKeyRequest
     outputStream.close();
 
@@ -183,42 +152,6 @@ public class TestOzoneFileOps {
             omKeyInfo.getPath());
   }
 
-  /**
-   * Verify key name format and the DB key existence in the expected dirKeys
-   * list.
-   *
-   * @param key     table keyName
-   * @param dirKeys expected keyName
-   */
-  private void verifyKeyFormat(String key, ArrayList<String> dirKeys) {
-    String[] keyParts = StringUtils.split(key,
-            OzoneConsts.OM_KEY_PREFIX.charAt(0));
-    Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
-    boolean removed = dirKeys.remove(key);
-    Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
-            removed);
-  }
-
-  /**
-   * Verify key name format and the DB key existence in the expected
-   * openFileKeys list.
-   *
-   * @param key          table keyName
-   * @param openFileKey expected keyName
-   */
-  private void verifyOpenKeyFormat(String key, String openFileKey) {
-    String[] keyParts = StringUtils.split(key,
-            OzoneConsts.OM_KEY_PREFIX.charAt(0));
-    Assert.assertEquals("Invalid KeyName:" + key, 3, keyParts.length);
-    String[] expectedOpenFileParts = StringUtils.split(openFileKey,
-            OzoneConsts.OM_KEY_PREFIX.charAt(0));
-    Assert.assertEquals("ParentId/Key:" + expectedOpenFileParts[0]
-                    + " doesn't exists in openFileTable!",
-            expectedOpenFileParts[0] + OzoneConsts.OM_KEY_PREFIX
-                    + expectedOpenFileParts[1],
-            keyParts[0] + OzoneConsts.OM_KEY_PREFIX + keyParts[1]);
-  }
-
   long verifyDirKey(long parentId, String dirKey, String absolutePath,
                     ArrayList<String> dirKeys, OMMetadataManager omMgr)
           throws Exception {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
index 93e5826..af241c5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
@@ -17,6 +17,7 @@
 
 package org.apache.hadoop.ozone.client.rpc;
 
+import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
@@ -24,8 +25,13 @@ import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 
@@ -179,4 +185,91 @@ public class TestOzoneClientMultipartUploadV1 {
     assertNotNull(multipartInfo.getUploadID());
   }
 
+  @Test
+  public void testUploadPartWithNoOverride() throws IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+    String sampleData = "sample Value";
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotNull(multipartInfo.getUploadID());
+
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            sampleData.length(), 1, uploadID);
+    ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
+            .getCommitUploadPartInfo();
+
+    assertNotNull(commitUploadPartInfo);
+    assertNotNull(commitUploadPartInfo.getPartName());
+  }
+
+  @Test
+  public void testUploadPartOverrideWithRatis() throws IOException {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+    String sampleData = "sample Value";
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            ReplicationType.RATIS, THREE);
+
+    assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotNull(multipartInfo.getUploadID());
+
+    int partNumber = 1;
+
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            sampleData.length(), partNumber, uploadID);
+    ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
+            .getCommitUploadPartInfo();
+
+    assertNotNull(commitUploadPartInfo);
+    String partName = commitUploadPartInfo.getPartName();
+    assertNotNull(commitUploadPartInfo.getPartName());
+
+    //Overwrite the part by creating part key with same part number.
+    sampleData = "sample Data Changed";
+    ozoneOutputStream = bucket.createMultipartKey(keyName,
+            sampleData.length(), partNumber, uploadID);
+    ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length());
+    ozoneOutputStream.close();
+
+    commitUploadPartInfo = ozoneOutputStream
+            .getCommitUploadPartInfo();
+
+    assertNotNull(commitUploadPartInfo);
+    assertNotNull(commitUploadPartInfo.getPartName());
+
+    // PartName should be different from old part Name.
+    assertNotEquals("Part names should be different", partName,
+            commitUploadPartInfo.getPartName());
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 8daa12b..40a5396 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUpload
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
 import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest;
 import org.apache.hadoop.ozone.om.request.security.OMCancelDelegationTokenRequest;
@@ -188,6 +189,9 @@ public final class OzoneManagerRatisUtils {
       }
       return new S3InitiateMultipartUploadRequest(omRequest);
     case CommitMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3MultipartUploadCommitPartRequestV1(omRequest);
+      }
       return new S3MultipartUploadCommitPartRequest(omRequest);
     case AbortMultiPartUpload:
       return new S3MultipartUploadAbortRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 7319690..85b8bc3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -692,9 +692,17 @@ public abstract class OMKeyRequest extends OMClientRequest {
     // error no such multipart upload.
     String uploadID = args.getMultipartUploadID();
     Preconditions.checkNotNull(uploadID);
-    String multipartKey = omMetadataManager
-            .getMultipartKey(args.getVolumeName(), args.getBucketName(),
-                    args.getKeyName(), uploadID);
+    String multipartKey = "";
+    if (omPathInfo != null) {
+      // FileTable metadata format
+      multipartKey = omMetadataManager.getMultipartKey(
+              omPathInfo.getLastKnownParentId(),
+              omPathInfo.getLeafNodeName(), uploadID);
+    } else {
+      multipartKey = omMetadataManager
+              .getMultipartKey(args.getVolumeName(), args.getBucketName(),
+                      args.getKeyName(), uploadID);
+    }
     OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
             multipartKey);
     if (partKeyInfo == null) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index b5c9fdf..14dba3d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -216,7 +216,17 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
             volumeName, bucketName);
       }
     }
+    logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName,
+            bucketName, keyName, exception, result);
 
+    return omClientResponse;
+  }
+
+  @SuppressWarnings("parameternumber")
+  protected void logResult(OzoneManager ozoneManager,
+      MultipartInfoInitiateRequest multipartInfoInitiateRequest,
+      Map<String, String> auditMap, String volumeName, String bucketName,
+      String keyName, IOException exception, Result result) {
     // audit log
     auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
         OMAction.INITIATE_MULTIPART_UPLOAD, auditMap,
@@ -238,7 +248,5 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
       LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}",
           multipartInfoInitiateRequest);
     }
-
-    return omClientResponse;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
index 3507090..d472bc1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
@@ -22,7 +22,6 @@ import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -228,28 +227,8 @@ public class S3InitiateMultipartUploadRequestV1
             volumeName, bucketName);
       }
     }
-
-    // audit log
-    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.INITIATE_MULTIPART_UPLOAD, auditMap,
-        exception, getOmRequest().getUserInfo()));
-
-    switch (result) {
-    case SUCCESS:
-      LOG.debug("S3 InitiateMultipart Upload request for Key {} in " +
-              "Volume/Bucket {}/{} is successfully completed", keyName,
-          volumeName, bucketName);
-      break;
-    case FAILURE:
-      ozoneManager.getMetrics().incNumInitiateMultipartUploadFails();
-      LOG.error("S3 InitiateMultipart Upload request for Key {} in " +
-              "Volume/Bucket {}/{} is failed", keyName, volumeName, bucketName,
-          exception);
-      break;
-    default:
-      LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}",
-          multipartInfoInitiateRequest);
-    }
+    logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName,
+            bucketName, keyName, exception, result);
 
     return omClientResponse;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index d529f92..d235998 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -129,16 +129,16 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
       String uploadID = keyArgs.getMultipartUploadID();
-      multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName,
-          keyName, uploadID);
+      multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+              omMetadataManager, uploadID);
 
       multipartKeyInfo = omMetadataManager.getMultipartInfoTable()
           .get(multipartKey);
 
       long clientID = multipartCommitUploadPartRequest.getClientID();
 
-      openKey = omMetadataManager.getOpenKey(
-          volumeName, bucketName, keyName, clientID);
+      openKey = getOpenKey(volumeName, bucketName, keyName, omMetadataManager,
+              clientID);
 
       String ozoneKey = omMetadataManager.getOzoneKey(
           volumeName, bucketName, keyName);
@@ -248,6 +248,31 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       }
     }
 
+    logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs,
+            auditMap, volumeName, bucketName, keyName, exception, partName,
+            result);
+
+    return omClientResponse;
+  }
+
+  private String getOpenKey(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager, long clientID) {
+    return omMetadataManager.getOpenKey(volumeName, bucketName,
+            keyName, clientID);
+  }
+
+  private String getMultipartKey(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager, String uploadID) {
+    return omMetadataManager.getMultipartKey(volumeName, bucketName,
+        keyName, uploadID);
+  }
+
+  @SuppressWarnings("parameternumber")
+  protected void logResult(OzoneManager ozoneManager,
+      MultipartCommitUploadPartRequest multipartCommitUploadPartRequest,
+      KeyArgs keyArgs, Map<String, String> auditMap, String volumeName,
+      String bucketName, String keyName, IOException exception,
+      String partName, Result result) {
     // audit log
     // Add MPU related information.
     auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER,
@@ -273,8 +298,6 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       LOG.error("Unrecognized Result for S3MultipartUploadCommitPartRequest: " +
           "{}", multipartCommitUploadPartRequest);
     }
-
-    return omClientResponse;
   }
 
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
similarity index 75%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
index d529f92..5546010 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
 import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -28,30 +28,25 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.s3.multipart
-    .S3MultipartUploadCommitPartResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCommitPartResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartCommitUploadPartRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartCommitUploadPartResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.stream.Collectors;
 
@@ -61,31 +56,17 @@ import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_L
 /**
  * Handle Multipart upload commit upload part file.
  */
-public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
+public class S3MultipartUploadCommitPartRequestV1
+        extends S3MultipartUploadCommitPartRequest {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(S3MultipartUploadCommitPartRequest.class);
+      LoggerFactory.getLogger(S3MultipartUploadCommitPartRequestV1.class);
 
-  public S3MultipartUploadCommitPartRequest(OMRequest omRequest) {
+  public S3MultipartUploadCommitPartRequestV1(OMRequest omRequest) {
     super(omRequest);
   }
 
   @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    MultipartCommitUploadPartRequest multipartCommitUploadPartRequest =
-        getOmRequest().getCommitMultiPartUploadRequest();
-
-    KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs();
-    return getOmRequest().toBuilder().setCommitMultiPartUploadRequest(
-        multipartCommitUploadPartRequest.toBuilder()
-            .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now())
-                .setKeyName(validateAndNormalizeKey(
-                    ozoneManager.getEnableFileSystemPaths(),
-                    keyArgs.getKeyName()))))
-        .setUserInfo(getUserInfo()).build();
-  }
-
-  @Override
   @SuppressWarnings("methodlength")
   public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
       long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
@@ -128,26 +109,31 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+      String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+      omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
+      long bucketId = omBucketInfo.getObjectID();
+      long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+              keyName, omMetadataManager);
+
       String uploadID = keyArgs.getMultipartUploadID();
-      multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName,
-          keyName, uploadID);
+      multipartKey = omMetadataManager.getMultipartKey(parentID,
+          fileName, uploadID);
 
       multipartKeyInfo = omMetadataManager.getMultipartInfoTable()
           .get(multipartKey);
 
       long clientID = multipartCommitUploadPartRequest.getClientID();
 
-      openKey = omMetadataManager.getOpenKey(
-          volumeName, bucketName, keyName, clientID);
+      openKey = omMetadataManager.getOpenFileName(parentID, fileName, clientID);
 
-      String ozoneKey = omMetadataManager.getOzoneKey(
-          volumeName, bucketName, keyName);
-
-      omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+      omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true,
+              omMetadataManager, openKey, keyName);
 
       if (omKeyInfo == null) {
         throw new OMException("Failed to commit Multipart Upload key, as " +
-            openKey + "entry is not found in the openKey table",
+            openKey + " entry is not found in the openKey table",
             KEY_NOT_FOUND);
       }
 
@@ -155,12 +141,13 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       omKeyInfo.setDataSize(keyArgs.getDataSize());
       omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream()
           .map(OmKeyLocationInfo::getFromProtobuf)
-          .collect(Collectors.toList()), true);
+          .collect(Collectors.toList()));
       // Set Modification time
       omKeyInfo.setModificationTime(keyArgs.getModificationTime());
       // Set the UpdateID to current transactionLogIndex
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
+      String ozoneKey = omMetadataManager.getOzonePathKey(parentID, fileName);
       partName = ozoneKey + clientID;
 
       if (multipartKeyInfo == null) {
@@ -197,8 +184,6 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       // S3MultipartUplodaCommitPartResponse before being added to
       // DeletedKeyTable.
 
-      // Add to cache.
-
       // Delete from open key table and add it to multipart info table.
       // No need to add cache entries to delete table, as no
       // read/write requests that info for validation.
@@ -225,7 +210,7 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       omResponse.setCommitMultiPartUploadResponse(
           MultipartCommitUploadPartResponse.newBuilder()
               .setPartName(partName));
-      omClientResponse = new S3MultipartUploadCommitPartResponse(
+      omClientResponse = new S3MultipartUploadCommitPartResponseV1(
           omResponse.build(), multipartKey, openKey,
           multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
           ozoneManager.isRatisEnabled(),
@@ -235,7 +220,7 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new S3MultipartUploadCommitPartResponse(
+      omClientResponse = new S3MultipartUploadCommitPartResponseV1(
           createErrorOMResponse(omResponse, exception), multipartKey, openKey,
           multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
           ozoneManager.isRatisEnabled(), copyBucketInfo);
@@ -248,34 +233,11 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       }
     }
 
-    // audit log
-    // Add MPU related information.
-    auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER,
-        String.valueOf(keyArgs.getMultipartNumber()));
-    auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NAME, partName);
-    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY,
-        auditMap, exception,
-        getOmRequest().getUserInfo()));
-
-    switch (result) {
-    case SUCCESS:
-      LOG.debug("MultipartUpload Commit is successfully for Key:{} in " +
-          "Volume/Bucket {}/{}", keyName, volumeName, bucketName);
-      break;
-    case FAILURE:
-      ozoneManager.getMetrics().incNumCommitMultipartUploadPartFails();
-      LOG.error("MultipartUpload Commit is failed for Key:{} in " +
-          "Volume/Bucket {}/{}", keyName, volumeName, bucketName,
-          exception);
-      break;
-    default:
-      LOG.error("Unrecognized Result for S3MultipartUploadCommitPartRequest: " +
-          "{}", multipartCommitUploadPartRequest);
-    }
+    logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs,
+            auditMap, volumeName, bucketName, keyName, exception, partName,
+            result);
 
     return omClientResponse;
   }
 
 }
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseV1.java
new file mode 100644
index 0000000..d8e5cc5
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseV1.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+
+/**
+ * Response for S3MultipartUploadCommitPart request.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, DELETED_TABLE,
+    MULTIPARTFILEINFO_TABLE})
+public class S3MultipartUploadCommitPartResponseV1
+        extends S3MultipartUploadCommitPartResponse {
+
+  /**
+   * Regular response.
+   * 1. Update MultipartKey in MultipartInfoTable with new PartKeyInfo
+   * 2. Delete openKey from OpenKeyTable
+   * 3. If old PartKeyInfo exists, put it in DeletedKeyTable
+   * @param omResponse
+   * @param multipartKey
+   * @param openKey
+   * @param omMultipartKeyInfo
+   * @param oldPartKeyInfo
+   */
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  public S3MultipartUploadCommitPartResponseV1(@Nonnull OMResponse omResponse,
+      String multipartKey, String openKey,
+      @Nullable OmMultipartKeyInfo omMultipartKeyInfo,
+      @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+      @Nullable OmKeyInfo openPartKeyInfoToBeDeleted,
+      boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo) {
+
+    super(omResponse, multipartKey, openKey, omMultipartKeyInfo,
+            oldPartKeyInfo, openPartKeyInfoToBeDeleted, isRatisEnabled,
+            omBucketInfo);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index 641ee8d..9f6cff8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -152,8 +152,7 @@ public class TestS3MultipartRequest {
         TestOMRequestUtils.createCommitPartMPURequest(volumeName, bucketName,
             keyName, clientID, dataSize, multipartUploadID, partNumber);
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(omRequest);
-
+            getS3MultipartUploadCommitReq(omRequest);
 
     OMRequest modifiedRequest =
         s3MultipartUploadCommitPartRequest.preExecute(ozoneManager);
@@ -247,4 +246,15 @@ public class TestS3MultipartRequest {
 
     return modifiedRequest;
   }
+
+  protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCommitPartRequest(omRequest);
+  }
+
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+          OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequest(initiateMPURequest);
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
index d623b17..6c8beb0 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
@@ -41,27 +41,28 @@ public class TestS3MultipartUploadCommitPartRequest
   public void testPreExecute() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     doPreExecuteCommitMPU(volumeName, bucketName, keyName, Time.now(),
         UUID.randomUUID().toString(), 1);
   }
 
-
   @Test
   public void testValidateAndUpdateCacheSuccess() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
+    createParentPath(volumeName, bucketName);
+
     OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
         bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
+            getS3InitiateMultipartUploadReq(initiateMPURequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -75,12 +76,10 @@ public class TestS3MultipartUploadCommitPartRequest
         bucketName, keyName, clientID, multipartUploadID, 1);
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
     // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+    addKeyToOpenKeyTable(volumeName, bucketName, keyName, clientID);
 
     omClientResponse =
         s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
@@ -90,8 +89,8 @@ public class TestS3MultipartUploadCommitPartRequest
     Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
         == OzoneManagerProtocolProtos.Status.OK);
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+            multipartUploadID);
 
     Assert.assertNotNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
@@ -107,11 +106,12 @@ public class TestS3MultipartUploadCommitPartRequest
   public void testValidateAndUpdateCacheMultipartNotFound() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
+    createParentPath(volumeName, bucketName);
 
     long clientID = Time.now();
     String multipartUploadID = UUID.randomUUID().toString();
@@ -120,12 +120,10 @@ public class TestS3MultipartUploadCommitPartRequest
         bucketName, keyName, clientID, multipartUploadID, 1);
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
     // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+    addKeyToOpenKeyTable(volumeName, bucketName, keyName, clientID);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
@@ -135,8 +133,8 @@ public class TestS3MultipartUploadCommitPartRequest
     Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
         == OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR);
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+            multipartUploadID);
 
     Assert.assertNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
@@ -147,7 +145,7 @@ public class TestS3MultipartUploadCommitPartRequest
   public void testValidateAndUpdateCacheKeyNotFound() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
@@ -163,7 +161,7 @@ public class TestS3MultipartUploadCommitPartRequest
     // part. It will fail with KEY_NOT_FOUND
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
 
     OMClientResponse omClientResponse =
@@ -180,7 +178,7 @@ public class TestS3MultipartUploadCommitPartRequest
   public void testValidateAndUpdateCacheBucketFound() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
 
@@ -195,7 +193,7 @@ public class TestS3MultipartUploadCommitPartRequest
     // part. It will fail with KEY_NOT_FOUND
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
 
     OMClientResponse omClientResponse =
@@ -206,4 +204,26 @@ public class TestS3MultipartUploadCommitPartRequest
         == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
 
   }
+
+  protected void addKeyToOpenKeyTable(String volumeName, String bucketName,
+      String keyName, long clientID) throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
+            keyName, clientID, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+  }
+
+  protected String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    return omMetadataManager.getMultipartKey(volumeName,
+            bucketName, keyName, multipartUploadID);
+  }
+
+  protected void createParentPath(String volumeName, String bucketName)
+          throws Exception {
+    // no parent hierarchy
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestV1.java
new file mode 100644
index 0000000..d0d01e1
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestV1.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+
+import java.util.UUID;
+
+/**
+ * Tests S3 Multipart upload commit part request.
+ */
+public class TestS3MultipartUploadCommitPartRequestV1
+    extends TestS3MultipartUploadCommitPartRequest {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCommitPartRequestV1(omRequest);
+  }
+
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+          OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequestV1(initiateMPURequest);
+  }
+
+  protected String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  protected void addKeyToOpenKeyTable(String volumeName, String bucketName,
+      String keyName, long clientID) throws Exception {
+    long txnLogId = 10000;
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID,
+            txnLogId, Time.now());
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfo, clientID, txnLogId, omMetadataManager);
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    String fileName = StringUtils.substringAfter(keyName, dirName);
+    return omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+  }
+
+  protected OMRequest doPreExecuteInitiateMPU(String volumeName,
+      String bucketName, String keyName) throws Exception {
+    OMRequest omRequest =
+            TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName,
+                    keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+            new S3InitiateMultipartUploadRequestV1(omRequest);
+
+    OMRequest modifiedRequest =
+            s3InitiateMultipartUploadRequest.preExecute(ozoneManager);
+
+    Assert.assertNotEquals(omRequest, modifiedRequest);
+    Assert.assertTrue(modifiedRequest.hasInitiateMultiPartUploadRequest());
+    Assert.assertNotNull(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID());
+    Assert.assertTrue(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getModificationTime() > 0);
+
+    return modifiedRequest;
+  }
+
+  protected void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            dirName, omMetadataManager);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 76ceb0e..106ae61 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.ozone.om.response.s3.multipart;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -156,6 +157,24 @@ public class TestS3MultipartResponse {
             .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
   }
 
+  public PartKeyInfo createPartKeyInfoV1(
+      String volumeName, String bucketName, long parentID, String fileName,
+      int partNumber) {
+    return PartKeyInfo.newBuilder()
+        .setPartNumber(partNumber)
+        .setPartName(omMetadataManager.getMultipartKey(parentID, fileName,
+                UUID.randomUUID().toString()))
+        .setPartKeyInfo(KeyInfo.newBuilder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(fileName)
+            .setDataSize(100L) // Just set dummy size for testing
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setParentID(parentID)
+            .setType(HddsProtos.ReplicationType.RATIS)
+            .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
+  }
 
   public S3InitiateMultipartUploadResponse createS3InitiateMPUResponseV1(
       String volumeName, String bucketName, long parentID, String keyName,
@@ -198,4 +217,61 @@ public class TestS3MultipartResponse {
             omKeyInfo, parentDirInfos);
   }
 
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  public S3MultipartUploadCommitPartResponse createS3CommitMPUResponseV1(
+          String volumeName, String bucketName, long parentID, String keyName,
+          String multipartUploadID,
+          OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+          OmMultipartKeyInfo multipartKeyInfo,
+          OzoneManagerProtocolProtos.Status status, String openKey)
+          throws IOException {
+    if (multipartKeyInfo == null) {
+      multipartKeyInfo = new OmMultipartKeyInfo.Builder()
+              .setUploadID(multipartUploadID)
+              .setCreationTime(Time.now())
+              .setReplicationType(HddsProtos.ReplicationType.RATIS)
+              .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
+              .setParentID(parentID)
+              .build();
+    }
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+
+    String multipartKey = getMultipartKey(parentID, keyName, multipartUploadID);
+    boolean isRatisEnabled = true;
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+
+    OmKeyInfo openPartKeyInfoToBeDeleted = new OmKeyInfo.Builder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(fileName)
+            .setFileName(fileName)
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setReplicationType(HddsProtos.ReplicationType.RATIS)
+            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+            .build();
+
+    OMResponse omResponse = OMResponse.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload)
+            .setStatus(status).setSuccess(true)
+            .setCommitMultiPartUploadResponse(
+                    OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse
+                            .newBuilder().setPartName(volumeName)).build();
+
+    return new S3MultipartUploadCommitPartResponseV1(omResponse, multipartKey,
+            openKey, multipartKeyInfo, oldPartKeyInfo,
+            openPartKeyInfoToBeDeleted, isRatisEnabled, omBucketInfo);
+  }
+
+  private String getMultipartKey(long parentID, String keyName,
+                                 String multipartUploadID) {
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseV1.java
new file mode 100644
index 0000000..511ffef
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseV1.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Test multipart upload commit part response.
+ */
+public class TestS3MultipartUploadCommitPartResponseV1
+    extends TestS3MultipartResponse {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  @Test
+  public void testAddDBToBatch() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    createParentPath(volumeName, bucketName);
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+        createS3CommitMPUResponseV1(volumeName, bucketName, parentID, keyName,
+            multipartUploadID, null, null,
+                OzoneManagerProtocolProtos.Status.OK, openKey);
+
+    s3MultipartUploadCommitPartResponse.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNotNull(
+        omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    // As no parts are created, so no entries should be there in delete table.
+    Assert.assertEquals(0, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+  }
+
+  @Test
+  public void testAddDBToBatchWithParts() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    createParentPath(volumeName, bucketName);
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+            createS3InitiateMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, new ArrayList<>());
+
+    s3InitiateMultipartUploadResponseV1.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    // Add some dummy parts for testing.
+    // Not added any key locations, as this just test is to see entries are
+    // adding to delete table or not.
+    OmMultipartKeyInfo omMultipartKeyInfo =
+            s3InitiateMultipartUploadResponseV1.getOmMultipartKeyInfo();
+
+    PartKeyInfo part1 = createPartKeyInfoV1(volumeName, bucketName, parentID,
+        fileName, 1);
+
+    addPart(1, part1, omMultipartKeyInfo);
+
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID,
+                    omMultipartKeyInfo.getPartKeyInfo(1),
+                    omMultipartKeyInfo,
+                    OzoneManagerProtocolProtos.Status.OK,  openKey);
+
+    s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+            batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNull(
+        omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    // As 1 parts are created, so 1 entry should be there in delete table.
+    Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+        omMetadataManager.getDeletedTable()));
+
+    String part1DeletedKeyName =
+        omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
+
+    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+        part1DeletedKeyName));
+
+    RepeatedOmKeyInfo ro =
+        omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
+    Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()),
+        ro.getOmKeyInfoList().get(0));
+  }
+
+  @Test
+  public void testWithMultipartUploadError() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    createParentPath(volumeName, bucketName);
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+            createS3InitiateMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, new ArrayList<>());
+
+    s3InitiateMultipartUploadResponseV1.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    // Add some dummy parts for testing.
+    // Not added any key locations, as this just test is to see entries are
+    // adding to delete table or not.
+    OmMultipartKeyInfo omMultipartKeyInfo =
+            s3InitiateMultipartUploadResponseV1.getOmMultipartKeyInfo();
+
+    PartKeyInfo part1 = createPartKeyInfoV1(volumeName, bucketName, parentID,
+            fileName, 1);
+
+    addPart(1, part1, omMultipartKeyInfo);
+
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName + "invalid", multipartUploadID,
+                    omMultipartKeyInfo.getPartKeyInfo(1),
+                    omMultipartKeyInfo, OzoneManagerProtocolProtos.Status
+                            .NO_SUCH_MULTIPART_UPLOAD_ERROR, openKey);
+
+    s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+            batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNull(
+            omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    // openkey entry should be there in delete table.
+    Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+
+    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+            openKey));
+  }
+
+  private String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  private void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            dirName, omMetadataManager);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 09/19: HDDS-2942. Putkey : create key table entries for intermediate directories in the key path (#1764)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 6145c24b0d91fd8047d46c49963b6990fdb860ce
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Mon Jan 11 20:03:55 2021 +0530

    HDDS-2942. Putkey : create key table entries for intermediate directories in the key path (#1764)
---
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 215 +++++++++++++++++++++
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../om/request/file/OMFileCreateRequestV1.java     |   3 +-
 .../ozone/om/request/key/OMKeyCommitRequestV1.java |   5 +-
 .../ozone/om/request/key/OMKeyCreateRequest.java   |  22 ++-
 .../OMKeyCreateRequestV1.java}                     | 148 ++++++--------
 .../om/response/file/OMFileCreateResponseV1.java   |   3 +-
 .../om/response/key/OMAllocateBlockResponseV1.java |   1 -
 .../om/response/key/OMKeyCommitResponseV1.java     |   2 -
 ...kResponseV1.java => OMKeyCreateResponseV1.java} |  42 ++--
 .../om/request/key/TestOMKeyCreateRequest.java     |  53 ++---
 .../om/request/key/TestOMKeyCreateRequestV1.java   | 129 +++++++++++++
 .../key/TestOMAllocateBlockResponseV1.java         |   5 +-
 .../om/response/key/TestOMKeyCommitResponseV1.java |   3 +-
 ...ponseV1.java => TestOMKeyCreateResponseV1.java} |  77 +++-----
 15 files changed, 501 insertions(+), 211 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
new file mode 100644
index 0000000..c6ae4ca
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Assert;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.UUID;
+
+public class TestObjectStoreV1 {
+
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneConfiguration conf;
+  private static String clusterId;
+  private static String scmId;
+  private static String omId;
+
+  @Rule
+  public Timeout timeout = new Timeout(240000);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setClusterId(clusterId)
+            .setScmId(scmId)
+            .setOmId(omId)
+            .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @Test
+  public void testCreateKey() throws Exception {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String parent = "a/b/c/";
+    String file = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + file;
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    objectStore.createVolume(volumeName);
+
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    ozoneVolume.createBucket(bucketName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    Table<String, OmKeyInfo> openKeyTable =
+            cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
+
+    // before file creation
+    verifyKeyInFileTable(openKeyTable, file, 0, true);
+
+    String data = "random data";
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+            data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
+            new HashMap<>());
+
+    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+
+    // after file creation
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            false);
+
+    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.close();
+
+    Table<String, OmKeyInfo> keyTable =
+            cluster.getOzoneManager().getMetadataManager().getKeyTable();
+
+    // After closing the file. File entry should be removed from openFileTable
+    // and it should be added to fileTable.
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            true);
+
+    ozoneBucket.deleteKey(key);
+
+    // after key delete
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), true);
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            true);
+  }
+
+  private OmDirectoryInfo getDirInfo(String volumeName, String bucketName,
+      String parentKey) throws Exception {
+    OMMetadataManager omMetadataManager =
+            cluster.getOzoneManager().getMetadataManager();
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(parentKey, '/');
+    long parentId = bucketId;
+    OmDirectoryInfo dirInfo = null;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      String dbKey = omMetadataManager.getOzonePathKey(parentId,
+              pathElement);
+      dirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      parentId = dirInfo.getObjectID();
+    }
+    return dirInfo;
+  }
+
+  private void verifyKeyInFileTable(Table<String, OmKeyInfo> fileTable,
+      String fileName, long parentID, boolean isEmpty) throws IOException {
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator
+            = fileTable.iterator();
+
+    if (isEmpty) {
+      Assert.assertTrue("Table is not empty!", fileTable.isEmpty());
+    } else {
+      Assert.assertFalse("Table is empty!", fileTable.isEmpty());
+      while (iterator.hasNext()) {
+        Table.KeyValue<String, OmKeyInfo> next = iterator.next();
+        Assert.assertEquals("Invalid Key: " + next.getKey(),
+                parentID + "/" + fileName, next.getKey());
+        OmKeyInfo omKeyInfo = next.getValue();
+        Assert.assertEquals("Invalid Key", fileName,
+                omKeyInfo.getFileName());
+        Assert.assertEquals("Invalid Key", fileName,
+                omKeyInfo.getKeyName());
+        Assert.assertEquals("Invalid Key", parentID,
+                omKeyInfo.getParentObjectID());
+      }
+    }
+  }
+
+  private void verifyKeyInOpenFileTable(Table<String, OmKeyInfo> openFileTable,
+      String fileName, long parentID, boolean isEmpty) throws IOException {
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator
+            = openFileTable.iterator();
+
+    if (isEmpty) {
+      Assert.assertTrue("Table is not empty!", openFileTable.isEmpty());
+    } else {
+      Assert.assertFalse("Table is empty!", openFileTable.isEmpty());
+      while (iterator.hasNext()) {
+        Table.KeyValue<String, OmKeyInfo> next = iterator.next();
+        // used startsWith because the key format is,
+        // <parentID>/fileName/<clientID> and clientID is not visible.
+        Assert.assertTrue("Invalid Key: " + next.getKey(),
+                next.getKey().startsWith(parentID + "/" + fileName));
+        OmKeyInfo omKeyInfo = next.getValue();
+        Assert.assertEquals("Invalid Key", fileName,
+                omKeyInfo.getFileName());
+        Assert.assertEquals("Invalid Key", fileName,
+                omKeyInfo.getKeyName());
+        Assert.assertEquals("Invalid Key", parentID,
+                omKeyInfo.getParentObjectID());
+      }
+    }
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 31120f1..c469c2c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
@@ -144,6 +145,9 @@ public final class OzoneManagerRatisUtils {
       }
       return new OMAllocateBlockRequest(omRequest);
     case CreateKey:
+      if (omLayoutVersionV1) {
+        return new OMKeyCreateRequestV1(omRequest);
+      }
       return new OMKeyCreateRequest(omRequest);
     case CommitKey:
       if (omLayoutVersionV1) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
index e38908a..f35c9a5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -217,7 +217,8 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
           .setOpenVersion(openVersion).build())
           .setCmdType(Type.CreateFile);
       omClientResponse = new OMFileCreateResponseV1(omResponse.build(),
-              omFileInfo, missingParentInfos, clientID, omBucketInfo.copyObject());
+              omFileInfo, missingParentInfos, clientID,
+              omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
index 1ea36cc..3c49610 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
@@ -91,7 +90,6 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
 
     IOException exception = null;
     OmKeyInfo omKeyInfo = null;
-    OmVolumeArgs omVolumeArgs = null;
     OmBucketInfo omBucketInfo = null;
     OMClientResponse omClientResponse = null;
     boolean bucketLockAcquired = false;
@@ -168,8 +166,7 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
       omBucketInfo.incrUsedBytes(correctedSpace);
 
       omClientResponse = new OMKeyCommitResponseV1(omResponse.build(),
-              omKeyInfo, dbFileKey, dbOpenFileKey, omVolumeArgs,
-              omBucketInfo.copyObject());
+              omKeyInfo, dbFileKey, dbOpenFileKey, omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index 55f4990..68c0c36 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -349,24 +349,34 @@ public class OMKeyCreateRequest extends OMKeyRequest {
         OMAction.ALLOCATE_KEY, auditMap, exception,
         getOmRequest().getUserInfo()));
 
+    logResult(createKeyRequest, omMetrics, exception, result,
+            numMissingParents);
+
+    return omClientResponse;
+  }
+
+  protected void logResult(CreateKeyRequest createKeyRequest,
+      OMMetrics omMetrics, IOException exception, Result result,
+       int numMissingParents) {
     switch (result) {
     case SUCCESS:
       // Missing directories are created immediately, counting that here.
       // The metric for the key is incremented as part of the key commit.
       omMetrics.incNumKeys(numMissingParents);
-      LOG.debug("Key created. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
+      LOG.debug("Key created. Volume:{}, Bucket:{}, Key:{}",
+              createKeyRequest.getKeyArgs().getVolumeName(),
+              createKeyRequest.getKeyArgs().getBucketName(),
+              createKeyRequest.getKeyArgs().getKeyName());
       break;
     case FAILURE:
       LOG.error("Key creation failed. Volume:{}, Bucket:{}, Key{}. " +
-          "Exception:{}", volumeName, bucketName, keyName, exception);
+          "Exception:{}", createKeyRequest.getKeyArgs().getVolumeName(),
+              createKeyRequest.getKeyArgs().getBucketName(),
+              createKeyRequest.getKeyArgs().getKeyName(), exception);
       break;
     default:
       LOG.error("Unrecognized Result for OMKeyCreateRequest: {}",
           createKeyRequest);
     }
-
-    return omClientResponse;
   }
-
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
similarity index 66%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
index e38908a..416e462 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.ozone.om.request.file;
+package org.apache.hadoop.ozone.om.request.key;
 
 import com.google.common.base.Optional;
 import org.apache.hadoop.ozone.audit.OMAction;
@@ -29,15 +29,15 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
-import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponseV1;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
@@ -51,16 +51,19 @@ import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
 
 /**
- * Handles create file request layout version1.
+ * Handles CreateKey request layout version1.
  */
-public class OMFileCreateRequestV1 extends OMFileCreateRequest {
-
+public class OMKeyCreateRequestV1 extends OMKeyCreateRequest {
   private static final Logger LOG =
-      LoggerFactory.getLogger(OMFileCreateRequestV1.class);
-  public OMFileCreateRequestV1(OMRequest omRequest) {
+          LoggerFactory.getLogger(OMKeyCreateRequestV1.class);
+
+  public OMKeyCreateRequestV1(OMRequest omRequest) {
     super(omRequest);
   }
 
@@ -69,61 +72,42 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
   public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
       long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
 
-    CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
-    KeyArgs keyArgs = createFileRequest.getKeyArgs();
+    OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest =
+            getOmRequest().getCreateKeyRequest();
+
+    OzoneManagerProtocolProtos.KeyArgs keyArgs = createKeyRequest.getKeyArgs();
     Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
 
     String volumeName = keyArgs.getVolumeName();
     String bucketName = keyArgs.getBucketName();
     String keyName = keyArgs.getKeyName();
 
-    // if isRecursive is true, file would be created even if parent
-    // directories does not exist.
-    boolean isRecursive = createFileRequest.getIsRecursive();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("File create for : " + volumeName + "/" + bucketName + "/"
-          + keyName + ":" + isRecursive);
-    }
-
-    // if isOverWrite is true, file would be over written.
-    boolean isOverWrite = createFileRequest.getIsOverwrite();
-
     OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumCreateFile();
+    omMetrics.incNumKeyAllocates();
 
     OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    boolean acquiredLock = false;
-
     OmBucketInfo omBucketInfo = null;
     final List<OmKeyLocationInfo> locations = new ArrayList<>();
-    List<OmDirectoryInfo> missingParentInfos;
-    int numKeysCreated = 0;
 
+    boolean acquireLock = false;
     OMClientResponse omClientResponse = null;
-    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
-        getOmRequest());
+    OzoneManagerProtocolProtos.OMResponse.Builder omResponse =
+            OmResponseUtil.getOMResponseBuilder(getOmRequest());
     IOException exception = null;
-    Result result = null;
+    Result result;
+    List<OmDirectoryInfo> missingParentInfos;
+    int numKeysCreated = 0;
     try {
       keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
       volumeName = keyArgs.getVolumeName();
       bucketName = keyArgs.getBucketName();
 
-      if (keyName.length() == 0) {
-        // Check if this is the root of the filesystem.
-        throw new OMException("Can not write to directory: " + keyName,
-                OMException.ResultCodes.NOT_A_FILE);
-      }
-
       // check Acl
       checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
-          IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
-
-      // acquire lock
-      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
+              IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
 
+      acquireLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
       OmKeyInfo dbFileInfo = null;
@@ -144,12 +128,14 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
         }
       }
 
-      // check if the file or directory already existed in OM
-      checkDirectoryResult(keyName, isOverWrite,
-              pathInfoV1.getDirectoryResult());
-
-      if (!isRecursive) {
-        checkAllParentsExist(keyArgs, pathInfoV1);
+      // Check if a file or directory exists with same key name.
+      if (pathInfoV1.getDirectoryResult() == DIRECTORY_EXISTS) {
+        throw new OMException("Cannot write to " +
+                "directory. createIntermediateDirs behavior is enabled and " +
+                "hence / has special interpretation: " + keyName, NOT_A_FILE);
+      } else if (pathInfoV1.getDirectoryResult() == FILE_EXISTS_IN_GIVENPATH) {
+        throw new OMException("Can not create file: " + keyName +
+                " as there is already file in the given path", NOT_A_FILE);
       }
 
       // add all missing parents to dir table
@@ -162,7 +148,7 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
 
       // do open key
       OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
-          omMetadataManager.getBucketKey(volumeName, bucketName));
+              omMetadataManager.getBucketKey(volumeName, bucketName));
 
       OmKeyInfo omFileInfo = prepareFileInfo(omMetadataManager, keyArgs,
               dbFileInfo, keyArgs.getDataSize(), locations,
@@ -172,15 +158,15 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
               ozoneManager.isRatisEnabled());
 
       long openVersion = omFileInfo.getLatestVersionLocations().getVersion();
-      long clientID = createFileRequest.getClientID();
+      long clientID = createKeyRequest.getClientID();
       String dbOpenFileName = omMetadataManager.getOpenFileName(
               pathInfoV1.getLastKnownParentId(), pathInfoV1.getLeafNodeName(),
               clientID);
 
       // Append new blocks
       List<OmKeyLocationInfo> newLocationList = keyArgs.getKeyLocationsList()
-          .stream().map(OmKeyLocationInfo::getFromProtobuf)
-          .collect(Collectors.toList());
+              .stream().map(OmKeyLocationInfo::getFromProtobuf)
+              .collect(Collectors.toList());
       omFileInfo.appendNewBlocks(newLocationList, false);
 
       omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
@@ -210,51 +196,39 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
 
       // Prepare response. Sets user given full key name in the 'keyName'
       // attribute in response object.
-      int clientVersion = getOmRequest().getVersion();
-      omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
-          .setKeyInfo(omFileInfo.getProtobuf(keyName, clientVersion))
-          .setID(clientID)
-          .setOpenVersion(openVersion).build())
-          .setCmdType(Type.CreateFile);
-      omClientResponse = new OMFileCreateResponseV1(omResponse.build(),
-              omFileInfo, missingParentInfos, clientID, omBucketInfo.copyObject());
+      omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
+              .setKeyInfo(omFileInfo.getProtobuf(keyName))
+              .setID(clientID)
+              .setOpenVersion(openVersion).build())
+              .setCmdType(Type.CreateKey);
+      omClientResponse = new OMKeyCreateResponseV1(omResponse.build(),
+              omFileInfo, missingParentInfos, clientID,
+              omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omMetrics.incNumCreateFileFails();
-      omResponse.setCmdType(Type.CreateFile);
-      omClientResponse = new OMFileCreateResponse(createErrorOMResponse(
-            omResponse, exception));
+      omMetrics.incNumKeyAllocateFails();
+      omResponse.setCmdType(Type.CreateKey);
+      omClientResponse = new OMKeyCreateResponse(
+              createErrorOMResponse(omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
-          omDoubleBufferHelper);
-      if (acquiredLock) {
+              omDoubleBufferHelper);
+      if (acquireLock) {
         omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
+                bucketName);
       }
     }
 
     // Audit Log outside the lock
     auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.CREATE_FILE, auditMap, exception,
-        getOmRequest().getUserInfo()));
-
-    switch (result) {
-    case SUCCESS:
-      omMetrics.incNumKeys(numKeysCreated);
-      LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
-      break;
-    case FAILURE:
-      LOG.error("File create failed. Volume:{}, Bucket:{}, Key{}.",
-          volumeName, bucketName, keyName, exception);
-      break;
-    default:
-      LOG.error("Unrecognized Result for OMFileCreateRequest: {}",
-          createFileRequest);
-    }
+            OMAction.ALLOCATE_KEY, auditMap, exception,
+            getOmRequest().getUserInfo()));
+
+    logResult(createKeyRequest, omMetrics, exception, result,
+            numKeysCreated);
 
     return omClientResponse;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
index ccaaa6b..7325def 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
@@ -32,12 +32,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
  * Response for create file request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = OPEN_FILE_TABLE)
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE, OPEN_FILE_TABLE})
 public class OMFileCreateResponseV1 extends OMFileCreateResponse {
 
   private List<OmDirectoryInfo> parentDirInfos;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
index ef8b639..138cca1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
index c0840e3..5f0a337 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
@@ -43,7 +42,6 @@ public class OMKeyCommitResponseV1 extends OMKeyCommitResponse {
   public OMKeyCommitResponseV1(@Nonnull OMResponse omResponse,
                                @Nonnull OmKeyInfo omKeyInfo,
                                String ozoneKeyName, String openKeyName,
-                               @Nonnull OmVolumeArgs omVolumeArgs,
                                @Nonnull OmBucketInfo omBucketInfo) {
     super(omResponse, omKeyInfo, ozoneKeyName, openKeyName,
             omBucketInfo);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java
similarity index 51%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java
index ef8b639..59c7edf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java
@@ -18,43 +18,31 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
 import javax.annotation.Nonnull;
-import java.io.IOException;
+import java.util.List;
 
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
- * Response for AllocateBlock request layout version V1.
+ * Response for CreateKey request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE})
-public class OMAllocateBlockResponseV1 extends OMAllocateBlockResponse {
-
-  public OMAllocateBlockResponseV1(@Nonnull OMResponse omResponse,
-      @Nonnull OmKeyInfo omKeyInfo, long clientID,
-      @Nonnull OmBucketInfo omBucketInfo) {
-    super(omResponse, omKeyInfo, clientID, omBucketInfo);
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation,
-            getOmKeyInfo(), getClientID());
-
-    // update bucket usedBytes.
-    omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-            omMetadataManager.getBucketKey(getOmKeyInfo().getVolumeName(),
-                    getOmKeyInfo().getBucketName()), getOmBucketInfo());
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE, OPEN_FILE_TABLE})
+public class OMKeyCreateResponseV1 extends OMFileCreateResponseV1 {
+
+  public OMKeyCreateResponseV1(@Nonnull OMResponse omResponse,
+                               @Nonnull OmKeyInfo omKeyInfo,
+                               @Nonnull List<OmDirectoryInfo> parentDirInfos,
+                               long openKeySessionID,
+                               @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, parentDirInfos, openKeySessionID,
+            omBucketInfo);
   }
 }
-
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
index 7269957..3df6f38 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
@@ -74,7 +74,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
         doPreExecute(createKeyRequest(false, 0));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
     // Add volume and bucket entries to DB.
     addVolumeAndBucketToDB(volumeName, bucketName,
@@ -82,8 +82,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
 
     long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
+    String openKey = getOpenKey(id);
 
     // Before calling
     OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
@@ -138,7 +137,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
         doPreExecute(createKeyRequest(true, partNumber));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
     // Add volume and bucket entries to DB.
     addVolumeAndBucketToDB(volumeName, bucketName,
@@ -178,7 +177,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
         doPreExecute(createKeyRequest(false, 0));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
 
     long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
@@ -217,13 +216,12 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
             false, 0));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
 
     long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
+    String openKey = getOpenKey(id);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
         omMetadataManager);
@@ -248,8 +246,6 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
 
   }
 
-
-
   /**
    * This method calls preExecute and verify the modified request.
    * @param originalOMRequest
@@ -259,7 +255,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
   private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(originalOMRequest);
+            getOMKeyCreateRequest(originalOMRequest);
 
     OMRequest modifiedOmRequest =
         omKeyCreateRequest.preExecute(ozoneManager);
@@ -349,7 +345,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
   @Test
   public void testKeyCreateWithFileSystemPathsEnabled() throws Exception {
 
-    OzoneConfiguration configuration = new OzoneConfiguration();
+    OzoneConfiguration configuration = getOzoneConfiguration();
     configuration.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
     when(ozoneManager.getConfiguration()).thenReturn(configuration);
     when(ozoneManager.getEnableFileSystemPaths()).thenReturn(true);
@@ -367,8 +363,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
     createAndCheck(keyName);
 
     // Commit openKey entry.
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        keyName.substring(1), 0L, RATIS, THREE, omMetadataManager);
+    addToKeyTable(keyName);
 
     // Now create another file in same dir path.
     keyName = "/a/b/c/file2";
@@ -430,10 +425,15 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
 
   }
 
+  protected void addToKeyTable(String keyName) throws Exception {
+    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
+        keyName.substring(1), 0L, RATIS, THREE, omMetadataManager);
+  }
+
 
   private void checkNotAValidPath(String keyName) {
     OMRequest omRequest = createKeyRequest(false, 0, keyName);
-    OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     try {
       omKeyCreateRequest.preExecute(ozoneManager);
@@ -450,11 +450,11 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
   private void checkNotAFile(String keyName) throws Exception {
     OMRequest omRequest = createKeyRequest(false, 0, keyName);
 
-    OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     omRequest = omKeyCreateRequest.preExecute(ozoneManager);
 
-    omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     OMClientResponse omClientResponse =
         omKeyCreateRequest.validateAndUpdateCache(ozoneManager,
@@ -468,11 +468,11 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
   private void createAndCheck(String keyName) throws Exception {
     OMRequest omRequest = createKeyRequest(false, 0, keyName);
 
-    OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     omRequest = omKeyCreateRequest.preExecute(ozoneManager);
 
-    omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     OMClientResponse omClientResponse =
         omKeyCreateRequest.validateAndUpdateCache(ozoneManager,
@@ -483,7 +483,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
     checkCreatedPaths(omKeyCreateRequest, omRequest, keyName);
   }
 
-  private void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
+  protected void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
       OMRequest omRequest, String keyName) throws Exception {
     keyName = omKeyCreateRequest.validateAndNormalizeKey(true, keyName);
     // Check intermediate directories created or not.
@@ -497,9 +497,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
     Assert.assertNotNull(omKeyInfo);
   }
 
-
-
-  private void checkIntermediatePaths(Path keyPath) throws Exception {
+  protected long checkIntermediatePaths(Path keyPath) throws Exception {
     // Check intermediate paths are created
     keyPath = keyPath.getParent();
     while(keyPath != null) {
@@ -508,6 +506,15 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
               keyPath.toString())));
       keyPath = keyPath.getParent();
     }
+    return -1;
+  }
+
+  protected String getOpenKey(long id) throws IOException {
+    return omMetadataManager.getOpenKey(volumeName, bucketName,
+            keyName, id);
   }
 
+  protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) {
+    return new OMKeyCreateRequest(omRequest);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
new file mode 100644
index 0000000..83c640d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
+
+/**
+ * Tests OMCreateKeyRequestV1 class.
+ */
+public class TestOMKeyCreateRequestV1 extends TestOMKeyCreateRequest {
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    return config;
+  }
+
+  protected void addToKeyTable(String keyName) throws Exception {
+    Path keyPath = Paths.get(keyName);
+    long parentId = checkIntermediatePaths(keyPath);
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, fileName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            fileName, omKeyInfo, -1, 50, omMetadataManager);
+  }
+
+  protected void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
+      OMRequest omRequest, String keyName) throws Exception {
+    keyName = omKeyCreateRequest.validateAndNormalizeKey(true, keyName);
+    // Check intermediate directories created or not.
+    Path keyPath = Paths.get(keyName);
+    long parentID = checkIntermediatePaths(keyPath);
+
+    // Check open key entry
+    String fileName = keyPath.getFileName().toString();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            omRequest.getCreateKeyRequest().getClientID());
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    Assert.assertNotNull(omKeyInfo);
+  }
+
+  protected long checkIntermediatePaths(Path keyPath) throws Exception {
+    // Check intermediate paths are created
+    keyPath = keyPath.getParent(); // skip the file name
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long lastKnownParentId = omBucketInfo.getObjectID();
+
+    Iterator<Path> elements = keyPath.iterator();
+    StringBuilder fullKeyPath = new StringBuilder(bucketKey);
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+      fullKeyPath.append(OzoneConsts.OM_KEY_PREFIX);
+      fullKeyPath.append(fileName);
+      String dbNodeName = omMetadataManager.getOzonePathKey(
+              lastKnownParentId, fileName);
+      OmDirectoryInfo omDirInfo = omMetadataManager.getDirectoryTable().
+              get(dbNodeName);
+
+      Assert.assertNotNull("Parent key path:" + fullKeyPath +
+              " doesn't exist", omDirInfo);
+      lastKnownParentId = omDirInfo.getObjectID();
+    }
+
+    return lastKnownParentId;
+  }
+
+  protected String getOpenKey(long id) throws IOException {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    if (omBucketInfo != null) {
+      return omMetadataManager.getOpenFileName(omBucketInfo.getObjectID(),
+              keyName, id);
+    } else {
+      return omMetadataManager.getOpenFileName(1000, keyName, id);
+    }
+  }
+
+  protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) {
+    return new OMKeyCreateRequestV1(omRequest);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
index e105a37..92b3efe 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
@@ -64,8 +63,8 @@ public class TestOMAllocateBlockResponseV1
 
   @NotNull
   protected OMAllocateBlockResponse getOmAllocateBlockResponse(
-          OmKeyInfo omKeyInfo, OmVolumeArgs omVolumeArgs,
-          OmBucketInfo omBucketInfo, OMResponse omResponse) {
+          OmKeyInfo omKeyInfo, OmBucketInfo omBucketInfo,
+          OMResponse omResponse) {
     return new OMAllocateBlockResponseV1(omResponse, omKeyInfo, clientID,
             omBucketInfo);
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
index 1e59ce8..4d68a4b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
@@ -42,8 +42,7 @@ public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
           OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
           String ozoneKey) {
     Assert.assertNotNull(omBucketInfo);
-    return new OMKeyCommitResponseV1(
-            omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
+    return new OMKeyCommitResponseV1(omResponse, omKeyInfo, ozoneKey, openKey,
             omBucketInfo);
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
similarity index 60%
copy from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
copy to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
index 1e59ce8..e51a06b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
@@ -19,61 +19,31 @@
 package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.util.Time;
 import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 
 /**
- * Tests OMKeyCommitResponse layout version V1.
+ * Tests OMKeyCreateResponseV1.
  */
-public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
-
-  @NotNull
-  protected OMKeyCommitResponse getOmKeyCommitResponse(
-          OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
-          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
-          String ozoneKey) {
-    Assert.assertNotNull(omBucketInfo);
-    return new OMKeyCommitResponseV1(
-            omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
-            omBucketInfo);
-  }
+public class TestOMKeyCreateResponseV1 extends TestOMKeyCreateResponse {
 
   @NotNull
   @Override
-  protected OmKeyInfo getOmKeyInfo() {
-    Assert.assertNotNull(omBucketInfo);
-    return TestOMRequestUtils.createOmKeyInfo(volumeName,
-            omBucketInfo.getBucketName(), keyName, replicationType,
-            replicationFactor,
-            omBucketInfo.getObjectID() + 1,
-            omBucketInfo.getObjectID(), 100, Time.now());
-  }
-
-  @NotNull
-  @Override
-  protected void addKeyToOpenKeyTable() throws Exception {
-    Assert.assertNotNull(omBucketInfo);
-    long parentID = omBucketInfo.getObjectID();
-    long objectId = parentID + 10;
-
-    OmKeyInfo omKeyInfoV1 =
-            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
-                    HddsProtos.ReplicationType.RATIS,
-                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
-                    Time.now());
-
-    String fileName = OzoneFSUtils.getFileName(keyName);
-    TestOMRequestUtils.addFileToKeyTable(true, false,
-            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    return config;
   }
 
   @NotNull
@@ -86,21 +56,20 @@ public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
 
   @NotNull
   @Override
-  protected String getOzoneKey() {
+  protected OmKeyInfo getOmKeyInfo() {
     Assert.assertNotNull(omBucketInfo);
-    return omMetadataManager.getOzonePathKey(omBucketInfo.getObjectID(),
-            keyName);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
   }
 
   @NotNull
-  @Override
-  protected OzoneConfiguration getOzoneConfiguration() {
-    OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
-    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
-    // and its not invoked in this test. Hence it is explicitly setting
-    // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
-    return config;
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMKeyCreateResponseV1(response, keyInfo, null, clientID,
+            bucketInfo);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 07/19: HDDS-4596. Directory table, fileTable and openFile Table is missing from the OM DB Definition. (#1724)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit f1fc06002f1dac135f450da7652559d2380ce61b
Author: Mukul Kumar Singh <ms...@apache.org>
AuthorDate: Sat Dec 19 23:36:59 2020 +0530

    HDDS-4596. Directory table, fileTable and openFile Table is missing from the OM DB Definition. (#1724)
---
 .../ozone/om/codec/OmDirectoryInfoCodec.java       |  0
 .../hadoop/ozone/om/codec/OMDBDefinition.java      | 31 +++++++++++++++++++++-
 2 files changed, 30 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
similarity index 100%
rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
rename to hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index b1c5096..d025948 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 
 import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
@@ -142,6 +143,33 @@ public class OMDBDefinition implements DBDefinition {
                     OMTransactionInfo.class,
                     new OMTransactionInfoCodec());
 
+  public static final DBColumnFamilyDefinition<String, OmDirectoryInfo>
+            DIRECTORY_TABLE =
+            new DBColumnFamilyDefinition<>(
+                    OmMetadataManagerImpl.DIRECTORY_TABLE,
+                    String.class,
+                    new StringCodec(),
+                    OmDirectoryInfo.class,
+                    new OmDirectoryInfoCodec());
+
+  public static final DBColumnFamilyDefinition<String, OmKeyInfo>
+            FILE_TABLE =
+            new DBColumnFamilyDefinition<>(
+                    OmMetadataManagerImpl.FILE_TABLE,
+                    String.class,
+                    new StringCodec(),
+                    OmKeyInfo.class,
+                    new OmKeyInfoCodec(true));
+
+  public static final DBColumnFamilyDefinition<String, OmKeyInfo>
+            OPEN_FILE_TABLE =
+            new DBColumnFamilyDefinition<>(
+                  OmMetadataManagerImpl.OPEN_FILE_TABLE,
+                  String.class,
+                  new StringCodec(),
+                  OmKeyInfo.class,
+                  new OmKeyInfoCodec(true));
+
   @Override
   public String getName() {
     return OzoneConsts.OM_DB_NAME;
@@ -157,7 +185,8 @@ public class OMDBDefinition implements DBDefinition {
     return new DBColumnFamilyDefinition[] {DELETED_TABLE, USER_TABLE,
         VOLUME_TABLE, OPEN_KEY_TABLE, KEY_TABLE,
         BUCKET_TABLE, MULTIPART_INFO_TABLE, PREFIX_TABLE, DTOKEN_TABLE,
-        S3_SECRET_TABLE, TRANSACTION_INFO_TABLE};
+        S3_SECRET_TABLE, TRANSACTION_INFO_TABLE, DIRECTORY_TABLE,
+        FILE_TABLE, OPEN_FILE_TABLE};
   }
 }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 05/19: HDDS-4358: Delete : make delete an atomic operation (#1607)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 0cae5ccbfcafce797ee53fcc19dad865bcff02f4
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Mon Dec 7 13:16:36 2020 +0530

    HDDS-4358: Delete : make delete an atomic operation (#1607)
---
 .../apache/hadoop/ozone/client/OzoneBucket.java    |  16 +-
 .../ozone/client/protocol/ClientProtocol.java      |   5 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   3 +-
 .../hadoop/ozone/om/exceptions/OMException.java    |   4 +-
 .../apache/hadoop/ozone/om/helpers/OmKeyArgs.java  |  16 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   3 +-
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  21 ++-
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 182 +++----------------
 .../src/main/proto/OmClientProtocol.proto          |   4 +
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  10 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../ozone/om/request/file/OMFileRequest.java       |  92 ++++++++++
 .../ozone/om/request/key/OMKeyDeleteRequestV1.java | 202 +++++++++++++++++++++
 .../ozone/om/request/key/OMKeyRenameRequestV1.java |   3 +-
 .../ozone/om/response/key/OMKeyDeleteResponse.java |   8 +
 ...eteResponse.java => OMKeyDeleteResponseV1.java} |  49 ++---
 .../ozone/om/request/TestOMRequestUtils.java       |   3 +
 .../om/request/key/TestOMKeyDeleteRequest.java     |  40 ++--
 .../om/request/key/TestOMKeyDeleteRequestV1.java   |  57 ++++++
 .../om/response/key/TestOMKeyDeleteResponse.java   |  77 ++++----
 .../om/response/key/TestOMKeyDeleteResponseV1.java |  70 +++++++
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |  26 ++-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  26 ++-
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |  19 +-
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java |   4 +-
 25 files changed, 693 insertions(+), 251 deletions(-)

diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index f688a66..c1877b4 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -553,7 +553,21 @@ public class OzoneBucket extends WithMetadata {
    * @throws IOException
    */
   public void deleteKey(String key) throws IOException {
-    proxy.deleteKey(volumeName, name, key);
+    proxy.deleteKey(volumeName, name, key, false);
+  }
+
+  /**
+   * Ozone FS api to delete a directory. Sub directories will be deleted if
+   * recursive flag is true, otherwise it will be non-recursive.
+   *
+   * @param key       Name of the key to be deleted.
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
+   * @throws IOException
+   */
+  public void deleteDirectory(String key, boolean recursive)
+      throws IOException {
+    proxy.deleteKey(volumeName, name, key, recursive);
   }
 
   /**
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 863a109..2f1c5af 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -289,9 +289,12 @@ public interface ClientProtocol {
    * @param volumeName Name of the Volume
    * @param bucketName Name of the Bucket
    * @param keyName Name of the Key
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
    * @throws IOException
    */
-  void deleteKey(String volumeName, String bucketName, String keyName)
+  void deleteKey(String volumeName, String bucketName, String keyName,
+                 boolean recursive)
       throws IOException;
 
   /**
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index ed85a32..ed678c2 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -749,7 +749,7 @@ public class RpcClient implements ClientProtocol {
 
   @Override
   public void deleteKey(
-      String volumeName, String bucketName, String keyName)
+      String volumeName, String bucketName, String keyName, boolean recursive)
       throws IOException {
     verifyVolumeName(volumeName);
     verifyBucketName(bucketName);
@@ -758,6 +758,7 @@ public class RpcClient implements ClientProtocol {
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
+        .setRecursive(recursive)
         .build();
     ozoneManagerClient.deleteKey(keyArgs);
   }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index b676bca..bba97cd 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -233,7 +233,9 @@ public class OMException extends IOException {
 
     QUOTA_EXCEEDED,
 
-    QUOTA_ERROR
+    QUOTA_ERROR,
+
+    DIRECTORY_NOT_EMPTY
 
   }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
index c08c988..f8c7c23 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
@@ -48,6 +48,7 @@ public final class OmKeyArgs implements Auditable {
   private boolean refreshPipeline;
   private boolean sortDatanodesInPipeline;
   private List<OzoneAcl> acls;
+  private boolean recursive;
 
   @SuppressWarnings("parameternumber")
   private OmKeyArgs(String volumeName, String bucketName, String keyName,
@@ -55,7 +56,7 @@ public final class OmKeyArgs implements Auditable {
       List<OmKeyLocationInfo> locationInfoList, boolean isMultipart,
       String uploadID, int partNumber,
       Map<String, String> metadataMap, boolean refreshPipeline,
-      List<OzoneAcl> acls, boolean sortDatanode) {
+      List<OzoneAcl> acls, boolean sortDatanode, boolean recursive) {
     this.volumeName = volumeName;
     this.bucketName = bucketName;
     this.keyName = keyName;
@@ -70,6 +71,7 @@ public final class OmKeyArgs implements Auditable {
     this.refreshPipeline = refreshPipeline;
     this.acls = acls;
     this.sortDatanodesInPipeline = sortDatanode;
+    this.recursive = recursive;
   }
 
   public boolean getIsMultipartKey() {
@@ -140,6 +142,10 @@ public final class OmKeyArgs implements Auditable {
     return sortDatanodesInPipeline;
   }
 
+  public boolean isRecursive() {
+    return recursive;
+  }
+
   @Override
   public Map<String, String> toAuditMap() {
     Map<String, String> auditMap = new LinkedHashMap<>();
@@ -198,6 +204,7 @@ public final class OmKeyArgs implements Auditable {
     private boolean refreshPipeline;
     private boolean sortDatanodesInPipeline;
     private List<OzoneAcl> acls;
+    private boolean recursive;
 
     public Builder setVolumeName(String volume) {
       this.volumeName = volume;
@@ -274,11 +281,16 @@ public final class OmKeyArgs implements Auditable {
       return this;
     }
 
+    public Builder setRecursive(boolean isRecursive) {
+      this.recursive = isRecursive;
+      return this;
+    }
+
     public OmKeyArgs build() {
       return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type,
           factor, locationInfoList, isMultipartKey, multipartUploadID,
           multipartUploadPartNumber, metadata, refreshPipeline, acls,
-          sortDatanodesInPipeline);
+          sortDatanodesInPipeline, recursive);
     }
 
   }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index a2b2f48..acc95ad 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -746,7 +746,8 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
     KeyArgs keyArgs = KeyArgs.newBuilder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName()).build();
+        .setKeyName(args.getKeyName())
+        .setRecursive(args.isRecursive()).build();
     req.setKeyArgs(keyArgs);
 
     OMRequest omRequest = createOMRequest(Type.DeleteKey)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 394abf9..60a28ce 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.TrashPolicy;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
@@ -377,6 +378,24 @@ public class TestOzoneFileSystem {
       ContractTestUtils.touch(fs, child);
     }
 
+    // delete a dir with sub-file
+    try {
+      FileStatus[] parents = fs.listStatus(grandparent);
+      Assert.assertTrue(parents.length > 0);
+      fs.delete(parents[0].getPath(), false);
+      Assert.fail("Must throw exception as dir is not empty!");
+    } catch (PathIsNotEmptyDirectoryException pde) {
+      // expected
+    }
+
+    // delete a dir with sub-file
+    try {
+      fs.delete(grandparent, false);
+      Assert.fail("Must throw exception as dir is not empty!");
+    } catch (PathIsNotEmptyDirectoryException pde) {
+      // expected
+    }
+
     // Delete the grandparent, which should delete all keys.
     fs.delete(grandparent, true);
 
@@ -785,7 +804,7 @@ public class TestOzoneFileSystem {
 
     // Add a sub-directory '/b/a' to '/b'. This is to verify that rename
     // throws exception as new destin /b/a already exists.
-    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
+    final Path baPath = new Path(fs.getUri().toString() + "/b/a/c");
     fs.mkdirs(baPath);
 
     Assert.assertFalse("New destin sub-path /b/a already exists",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index e2b7887..212080b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.fs.ozone;
 
 import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -30,19 +29,12 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.junit.Assert;
 import org.junit.After;
 import org.junit.Before;
@@ -59,8 +51,6 @@ import org.slf4j.LoggerFactory;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.Map;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
@@ -124,7 +114,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   public void cleanup() {
     super.cleanup();
     try {
-      tableCleanup();
+      deleteRootDir();
     } catch (IOException e) {
       LOG.info("Failed to cleanup DB tables.", e);
       fail("Failed to cleanup DB tables." + e.getMessage());
@@ -381,153 +371,8 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   }
 
   /**
-   * Case-5) If new destin '/dst/source' exists then throws exception.
-   * If destination is a directory then rename source as sub-path of it.
-   * <p>
-   * For example: rename /a to /b will lead to /b/a. This new path should
-   * not exist.
-   */
-  @Test
-  public void testRenameToNewSubDirShouldNotExist() throws Exception {
-    // Case-5.a) Rename directory from /a to /b.
-    // created /a
-    final Path aSourcePath = new Path(fs.getUri().toString() + "/a");
-    fs.mkdirs(aSourcePath);
-
-    // created /b
-    final Path bDestinPath = new Path(fs.getUri().toString() + "/b");
-    fs.mkdirs(bDestinPath);
-
-    // Add a sub-directory '/b/a' to '/b'. This is to verify that rename
-    // throws exception as new destin /b/a already exists.
-    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
-    fs.mkdirs(baPath);
-
-    try {
-      fs.rename(aSourcePath, bDestinPath);
-      Assert.fail("Should fail as new destination dir exists!");
-    } catch (OMException ome) {
-      // expected as new sub-path /b/a already exists.
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
-    }
-
-    // Case-5.b) Rename file from /a/b/c/file1 to /a.
-    // Should be failed since /a/file1 exists.
-    final Path abcPath = new Path(fs.getUri().toString() + "/a/b/c");
-    fs.mkdirs(abcPath);
-    Path abcFile1 = new Path(abcPath, "/file1");
-    ContractTestUtils.touch(fs, abcFile1);
-
-    final Path aFile1 = new Path(fs.getUri().toString() + "/a/file1");
-    ContractTestUtils.touch(fs, aFile1);
-
-    final Path aDestinPath = new Path(fs.getUri().toString() + "/a");
-
-    try {
-      fs.rename(abcFile1, aDestinPath);
-      Assert.fail("Should fail as new destination file exists!");
-    } catch (OMException ome) {
-      // expected as new sub-path /b/a already exists.
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
-    }
-  }
-
-  /**
-   * Case-6) Rename directory to an existed file, should be failed.
-   */
-  @Test
-  public void testRenameDirToFile() throws Exception {
-    final String root = "/root";
-    Path rootPath = new Path(fs.getUri().toString() + root);
-    fs.mkdirs(rootPath);
-
-    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
-    ContractTestUtils.touch(fs, file1Destin);
-    Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c");
-    fs.mkdirs(abcRootPath);
-    try {
-      fs.rename(abcRootPath, file1Destin);
-      Assert.fail("key already exists /root_dir/file1");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
-    }
-  }
-
-  /**
    * Cleanup keyTable and directoryTable explicitly as FS delete operation
    * is not yet supported.
-   *
-   * @throws IOException DB failure
-   */
-  private void tableCleanup() throws IOException {
-    OMMetadataManager metadataMgr = cluster.getOzoneManager()
-            .getMetadataManager();
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmDirectoryInfo>> dirTableIterator =
-            metadataMgr.getDirectoryTable().iterator();
-    dirTableIterator.seekToFirst();
-    ArrayList <String> dirList = new ArrayList<>();
-    while (dirTableIterator.hasNext()) {
-      String key = dirTableIterator.key();
-      if (StringUtils.isNotBlank(key)) {
-        dirList.add(key);
-      }
-      dirTableIterator.next();
-    }
-
-    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
-            cacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
-    while(cacheIterator.hasNext()){
-      cacheIterator.next();
-      cacheIterator.remove();
-    }
-
-    for (String dirKey : dirList) {
-      metadataMgr.getDirectoryTable().delete(dirKey);
-      Assert.assertNull("Unexpected entry!",
-              metadataMgr.getDirectoryTable().get(dirKey));
-    }
-
-    Assert.assertTrue("DirTable is not empty",
-            metadataMgr.getDirectoryTable().isEmpty());
-
-    Assert.assertFalse(metadataMgr.getDirectoryTable().cacheIterator()
-            .hasNext());
-
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmKeyInfo>> keyTableIterator =
-            metadataMgr.getKeyTable().iterator();
-    keyTableIterator.seekToFirst();
-    ArrayList <String> fileList = new ArrayList<>();
-    while (keyTableIterator.hasNext()) {
-      String key = keyTableIterator.key();
-      if (StringUtils.isNotBlank(key)) {
-        fileList.add(key);
-      }
-      keyTableIterator.next();
-    }
-
-    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
-            keyCacheIterator = metadataMgr.getKeyTable().cacheIterator();
-    while(keyCacheIterator.hasNext()){
-      keyCacheIterator.next();
-      keyCacheIterator.remove();
-    }
-
-    for (String fileKey : fileList) {
-      metadataMgr.getKeyTable().delete(fileKey);
-      Assert.assertNull("Unexpected entry!",
-              metadataMgr.getKeyTable().get(fileKey));
-    }
-
-    Assert.assertTrue("KeyTable is not empty",
-            metadataMgr.getKeyTable().isEmpty());
-
-    rootItemCount = 0;
-  }
-
-  /**
    * Fails if the (a) parent of dst does not exist or (b) parent is a file.
    */
   @Test
@@ -562,6 +407,31 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     }
   }
 
+  /**
+   * Cleanup files and directories.
+   *
+   * @throws IOException DB failure
+   */
+  protected void deleteRootDir() throws IOException {
+    Path root = new Path("/");
+    FileStatus[] fileStatuses = fs.listStatus(root);
+
+    rootItemCount = 0; // reset to zero
+
+    if (fileStatuses == null) {
+      return;
+    }
+
+    for (FileStatus fStatus : fileStatuses) {
+      fs.delete(fStatus.getPath(), true);
+    }
+
+    fileStatuses = fs.listStatus(root);
+    if (fileStatuses != null) {
+      Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
+    }
+  }
+
   @Override
   @Test
   @Ignore("TODO:HDDS-2939")
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 6707e08..f429276 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -321,6 +321,7 @@ enum Status {
 
     QUOTA_ERROR = 67;
 
+    DIRECTORY_NOT_EMPTY = 68;
 }
 
 /**
@@ -731,6 +732,9 @@ message KeyArgs {
 
     // This will be set by leader OM in HA and update the original request.
     optional FileEncryptionInfoProto fileEncryptionInfo = 15;
+
+    // This will be set when user performs delete directory recursively.
+    optional bool recursive = 16;
 }
 
 message KeyLocation {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 8aaca5f..46ffce8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2462,7 +2462,8 @@ public class KeyManagerImpl implements KeyManager {
 
     while (iterator.hasNext() && numEntries - countEntries > 0) {
       OmDirectoryInfo dirInfo = iterator.value().getValue();
-      if (!isImmediateChild(dirInfo.getParentObjectID(), prefixKeyInDB)) {
+      if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
+              prefixKeyInDB)) {
         break;
       }
 
@@ -2497,7 +2498,8 @@ public class KeyManagerImpl implements KeyManager {
     while (iterator.hasNext() && numEntries - countEntries > 0) {
       OmKeyInfo keyInfo = iterator.value().getValue();
 
-      if (!isImmediateChild(keyInfo.getParentObjectID(), prefixKeyInDB)) {
+      if (!OMFileRequest.isImmediateChild(keyInfo.getParentObjectID(),
+              prefixKeyInDB)) {
         break;
       }
 
@@ -2512,10 +2514,6 @@ public class KeyManagerImpl implements KeyManager {
     return countEntries;
   }
 
-  private boolean isImmediateChild(long parentId, long ancestorId) {
-    return parentId == ancestorId;
-  }
-
   /**
    * Helper function for listStatus to find key in FileTableCache.
    */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index d2dd5c7..7b5988c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequestV1;
@@ -146,6 +147,9 @@ public final class OzoneManagerRatisUtils {
       }
       return new OMKeyCommitRequest(omRequest);
     case DeleteKey:
+      if (omLayoutVersionV1) {
+        return new OMKeyDeleteRequestV1(omRequest);
+      }
       return new OMKeyDeleteRequest(omRequest);
     case DeleteKeys:
       return new OMKeysDeleteRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index e7b43d6..fc9bab0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -25,11 +25,14 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.base.Optional;
 import com.google.common.base.Strings;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -759,4 +762,93 @@ public final class OMFileRequest {
     }
     return toKeyParentDirStatus.getKeyInfo().getObjectID();
   }
+
+  /**
+   * Check if there are any sub path exist for the given user key path.
+   *
+   * @param omKeyInfo om key path
+   * @param metaMgr   OMMetadataManager
+   * @return true if there are any sub path, false otherwise
+   * @throws IOException DB exception
+   */
+  public static boolean hasChildren(OmKeyInfo omKeyInfo,
+      OMMetadataManager metaMgr) throws IOException {
+    return checkSubDirectoryExists(omKeyInfo, metaMgr) ||
+            checkSubFileExists(omKeyInfo, metaMgr);
+  }
+
+  private static boolean checkSubDirectoryExists(OmKeyInfo omKeyInfo,
+      OMMetadataManager metaMgr) throws IOException {
+    // Check all dirTable cache for any sub paths.
+    Table dirTable = metaMgr.getDirectoryTable();
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            cacheIter = dirTable.cacheIterator();
+
+    while (cacheIter.hasNext()) {
+      Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>> entry =
+              cacheIter.next();
+      OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue();
+      if (cacheOmDirInfo == null) {
+        continue;
+      }
+      if (isImmediateChild(cacheOmDirInfo.getParentObjectID(),
+              omKeyInfo.getObjectID())) {
+        return true; // found a sub path directory
+      }
+    }
+
+    // Check dirTable entries for any sub paths.
+    String seekDirInDB = metaMgr.getOzonePathKey(omKeyInfo.getObjectID(), "");
+    TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
+            iterator = dirTable.iterator();
+
+    iterator.seek(seekDirInDB);
+
+    if (iterator.hasNext()) {
+      OmDirectoryInfo dirInfo = iterator.value().getValue();
+      return isImmediateChild(dirInfo.getParentObjectID(),
+              omKeyInfo.getObjectID());
+    }
+    return false; // no sub paths found
+  }
+
+  private static boolean checkSubFileExists(OmKeyInfo omKeyInfo,
+      OMMetadataManager metaMgr) throws IOException {
+    // Check all fileTable cache for any sub paths.
+    Table fileTable = metaMgr.getKeyTable();
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
+            cacheIter = fileTable.cacheIterator();
+
+    while (cacheIter.hasNext()) {
+      Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry =
+              cacheIter.next();
+      OmKeyInfo cacheOmFileInfo = entry.getValue().getCacheValue();
+      if (cacheOmFileInfo == null) {
+        continue;
+      }
+      if (isImmediateChild(cacheOmFileInfo.getParentObjectID(),
+              omKeyInfo.getObjectID())) {
+        return true; // found a sub path file
+      }
+    }
+
+    // Check fileTable entries for any sub paths.
+    String seekFileInDB = metaMgr.getOzonePathKey(
+            omKeyInfo.getObjectID(), "");
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+            iterator = fileTable.iterator();
+
+    iterator.seek(seekFileInDB);
+
+    if (iterator.hasNext()) {
+      OmKeyInfo fileInfo = iterator.value().getValue();
+      return isImmediateChild(fileInfo.getParentObjectID(),
+              omKeyInfo.getObjectID()); // found a sub path file
+    }
+    return false; // no sub paths found
+  }
+
+  public static boolean isImmediateChild(long parentId, long ancestorId) {
+    return parentId == ancestorId;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
new file mode 100644
index 0000000..93531bc
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_EMPTY;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles DeleteKey request layout version V1.
+ */
+public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMKeyDeleteRequestV1.class);
+
+  public OMKeyDeleteRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest();
+
+    OzoneManagerProtocolProtos.KeyArgs keyArgs =
+        deleteKeyRequest.getKeyArgs();
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+    boolean recursive = keyArgs.getRecursive();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumKeyDeletes();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    IOException exception = null;
+    boolean acquiredLock = false;
+    OMClientResponse omClientResponse = null;
+    Result result = null;
+    OmVolumeArgs omVolumeArgs = null;
+    OmBucketInfo omBucketInfo = null;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acl
+      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY);
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      // Validate bucket and volume exists or not.
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      OzoneFileStatus keyStatus =
+              OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName,
+                      bucketName, keyName, 0);
+
+      if (keyStatus == null) {
+        throw new OMException("Key not found. Key:" + keyName, KEY_NOT_FOUND);
+      }
+
+      OmKeyInfo omKeyInfo = keyStatus.getKeyInfo();
+
+      // Set the UpdateID to current transactionLogIndex
+      omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+
+      String ozonePathKey = omMetadataManager.getOzonePathKey(
+              omKeyInfo.getParentObjectID(), omKeyInfo.getFileName());
+
+      if (keyStatus.isDirectory()) {
+        // Check if there are any sub path exists under the user requested path
+        if (!recursive && OMFileRequest.hasChildren(omKeyInfo,
+                omMetadataManager)) {
+          throw new OMException("Directory is not empty. Key:" + keyName,
+                  DIRECTORY_NOT_EMPTY);
+        }
+
+        // Update dir cache.
+        omMetadataManager.getDirectoryTable().addCacheEntry(
+                new CacheKey<>(ozonePathKey),
+                new CacheValue<>(Optional.absent(), trxnLogIndex));
+      } else {
+        // Update table cache.
+        omMetadataManager.getKeyTable().addCacheEntry(
+                new CacheKey<>(ozonePathKey),
+                new CacheValue<>(Optional.absent(), trxnLogIndex));
+      }
+
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+
+      long quotaReleased = sumBlockLengths(omKeyInfo);
+      omBucketInfo.incrUsedBytes(-quotaReleased);
+      omBucketInfo.incrUsedNamespace(-1L);
+
+      // No need to add cache entries to delete table. As delete table will
+      // be used by DeleteKeyService only, not used for any client response
+      // validation, so we don't need to add to cache.
+      // TODO: Revisit if we need it later.
+
+      omClientResponse = new OMKeyDeleteResponseV1(omResponse
+          .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
+          omKeyInfo, ozoneManager.isRatisEnabled(),
+          omBucketInfo, keyStatus.isDirectory());
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new OMKeyDeleteResponseV1(
+          createErrorOMResponse(omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+            omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+            bucketName);
+      }
+    }
+
+    // Performing audit logging outside of the lock.
+    auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap,
+        exception, userInfo));
+
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.decNumKeys();
+      LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName,
+          bucketName, keyName);
+      break;
+    case FAILURE:
+      omMetrics.incNumKeyDeleteFails();
+      LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key:{}.",
+          volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeyDeleteRequest: {}",
+          deleteKeyRequest);
+    }
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
index 74e53fe..ba022c5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
@@ -90,7 +90,6 @@ public class OMKeyRenameRequestV1 extends OMKeyRenameRequest {
     OMClientResponse omClientResponse = null;
     IOException exception = null;
     OmKeyInfo fromKeyValue;
-    String fromKey = null;
     Result result;
     try {
       if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
@@ -122,7 +121,7 @@ public class OMKeyRenameRequestV1 extends OMKeyRenameRequest {
       // case-1) fromKeyName should exist, otw throws exception
       if (fromKeyFileStatus == null) {
         // TODO: Add support for renaming open key
-        throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND);
+        throw new OMException("Key not found " + fromKeyName, KEY_NOT_FOUND);
       }
 
       // source existed
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
index 58785c0..868d8c9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
@@ -75,4 +75,12 @@ public class OMKeyDeleteResponse extends AbstractOMKeyDeleteResponse {
         omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
             omBucketInfo.getBucketName()), omBucketInfo);
   }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java
similarity index 60%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java
index 58785c0..15c1ba6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java
@@ -18,43 +18,42 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
-import java.io.IOException;
 import javax.annotation.Nonnull;
+import java.io.IOException;
 
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
 
 /**
  * Response for DeleteKey request.
  */
-@CleanupTableInfo(cleanupTables = {KEY_TABLE, DELETED_TABLE})
-public class OMKeyDeleteResponse extends AbstractOMKeyDeleteResponse {
+@CleanupTableInfo(cleanupTables = {FILE_TABLE, DIRECTORY_TABLE, DELETED_TABLE})
+public class OMKeyDeleteResponseV1 extends OMKeyDeleteResponse {
 
-  private OmKeyInfo omKeyInfo;
-  private OmBucketInfo omBucketInfo;
+  private boolean isDeleteDirectory;
 
-  public OMKeyDeleteResponse(@Nonnull OMResponse omResponse,
+  public OMKeyDeleteResponseV1(@Nonnull OMResponse omResponse,
       @Nonnull OmKeyInfo omKeyInfo, boolean isRatisEnabled,
-      @Nonnull OmBucketInfo omBucketInfo) {
-    super(omResponse, isRatisEnabled);
-    this.omKeyInfo = omKeyInfo;
-    this.omBucketInfo = omBucketInfo;
+      @Nonnull OmBucketInfo omBucketInfo,
+      @Nonnull boolean isDeleteDirectory) {
+    super(omResponse, omKeyInfo, isRatisEnabled, omBucketInfo);
+    this.isDeleteDirectory = isDeleteDirectory;
   }
 
   /**
    * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
-  public OMKeyDeleteResponse(@Nonnull OMResponse omResponse) {
+  public OMKeyDeleteResponseV1(@Nonnull OMResponse omResponse) {
     super(omResponse);
   }
 
@@ -64,15 +63,21 @@ public class OMKeyDeleteResponse extends AbstractOMKeyDeleteResponse {
 
     // For OmResponse with failure, this should do nothing. This method is
     // not called in failure scenario in OM code.
-    String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-        omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
-    Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable();
-    addDeletionToBatch(omMetadataManager, batchOperation, keyTable, ozoneKey,
-        omKeyInfo);
+    String ozoneDbKey = omMetadataManager.getOzonePathKey(
+            getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName());
+
+    if (isDeleteDirectory) {
+      omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation,
+              ozoneDbKey);
+    } else {
+      Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable();
+      addDeletionToBatch(omMetadataManager, batchOperation, keyTable,
+              ozoneDbKey, getOmKeyInfo());
+    }
 
     // update bucket usedBytes.
     omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-        omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-            omBucketInfo.getBucketName()), omBucketInfo);
+            omMetadataManager.getBucketKey(getOmBucketInfo().getVolumeName(),
+                    getOmBucketInfo().getBucketName()), getOmBucketInfo());
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index 2d4b5cb..61fd676 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -910,6 +910,9 @@ public final class TestOMRequestUtils {
           throws Exception {
     long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
             omMetaMgr);
+    if (org.apache.commons.lang3.StringUtils.isBlank(key)) {
+      return bucketId;
+    }
     String[] pathComponents = StringUtils.split(key, '/');
     long objectId = bucketId + 10;
     long parentId = bucketId;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
index b8e5603..b5af354 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
@@ -46,27 +46,23 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
 
   @Test
   public void testValidateAndUpdateCache() throws Exception {
-    OMRequest modifiedOmRequest =
-        doPreExecute(createDeleteKeyRequest());
-
-    OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
-
     // Add volume, bucket and key entries to OM DB.
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = addKeyToTable();
 
     OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
 
     // As we added manually to key table.
     Assert.assertNotNull(omKeyInfo);
 
+    OMRequest modifiedOmRequest =
+            doPreExecute(createDeleteKeyRequest());
+
+    OMKeyDeleteRequest omKeyDeleteRequest =
+            getOmKeyDeleteRequest(modifiedOmRequest);
+
     OMClientResponse omClientResponse =
         omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
         100L, ozoneManagerDoubleBufferHelper);
@@ -86,7 +82,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
         doPreExecute(createDeleteKeyRequest());
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
+            getOmKeyDeleteRequest(modifiedOmRequest);
 
     // Add only volume and bucket entry to DB.
     // In actual implementation we don't check for bucket/volume exists
@@ -108,7 +104,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
         doPreExecute(createDeleteKeyRequest());
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
+            getOmKeyDeleteRequest(modifiedOmRequest);
 
     OMClientResponse omClientResponse =
         omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
@@ -124,7 +120,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
         doPreExecute(createDeleteKeyRequest());
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
+            getOmKeyDeleteRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
 
@@ -145,7 +141,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
   private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception {
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(originalOmRequest);
+            getOmKeyDeleteRequest(originalOmRequest);
 
     OMRequest modifiedOmRequest = omKeyDeleteRequest.preExecute(ozoneManager);
 
@@ -170,4 +166,18 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
         .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
         .setClientId(UUID.randomUUID().toString()).build();
   }
+
+  protected String addKeyToTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(false, volumeName,
+            bucketName, keyName, clientID, replicationType, replicationFactor,
+            omMetadataManager);
+
+    return omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+  }
+
+  protected OMKeyDeleteRequest getOmKeyDeleteRequest(
+      OMRequest modifiedOmRequest) {
+    return new OMKeyDeleteRequest(modifiedOmRequest);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
new file mode 100644
index 0000000..dbba143
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+
+/**
+ * Tests OmKeyDelete request layout version V1.
+ */
+public class TestOMKeyDeleteRequestV1 extends TestOMKeyDeleteRequest {
+
+  protected OMKeyDeleteRequest getOmKeyDeleteRequest(
+      OMRequest modifiedOmRequest) {
+    return new OMKeyDeleteRequestV1(modifiedOmRequest);
+  }
+
+  protected String addKeyToTable() throws Exception {
+    String parentDir = "c/d/e";
+    String fileName = "file1";
+    String key = parentDir + "/" + fileName;
+    keyName = key; // updated key name
+
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, parentDir, omMetadataManager);
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            fileName, omKeyInfo, -1, 50, omMetadataManager);
+    return omKeyInfo.getPath();
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
index 3c22832..712a4ec 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
@@ -39,14 +39,15 @@ import java.util.List;
  */
 public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
 
+  private OmBucketInfo omBucketInfo;
+
   @Test
   public void testAddToDBBatch() throws Exception {
+    omBucketInfo = OmBucketInfo.newBuilder()
+            .setVolumeName(volumeName).setBucketName(bucketName)
+            .setCreationTime(Time.now()).build();
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setCreationTime(Time.now()).build();
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
 
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -55,14 +56,10 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
 
-    OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
-        omResponse, omKeyInfo, true, omBucketInfo);
+    OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo,
+            omResponse);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    String ozoneKey = addKeyToTable();
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
     omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -80,12 +77,11 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
 
   @Test
   public void testAddToDBBatchWithNonEmptyBlocks() throws Exception {
+    omBucketInfo = OmBucketInfo.newBuilder()
+            .setVolumeName(volumeName).setBucketName(bucketName)
+            .setCreationTime(Time.now()).build();
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setCreationTime(Time.now()).build();
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
 
     // Add block to key.
     List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
@@ -108,10 +104,7 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
 
     omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+    String ozoneKey = addKeyToTable();
 
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -120,8 +113,8 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
 
-    OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
-        omResponse, omKeyInfo, true, omBucketInfo);
+    OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo,
+            omResponse);
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
     omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -139,12 +132,10 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
 
   @Test
   public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setCreationTime(Time.now()).build();
+    omBucketInfo = OmBucketInfo.newBuilder()
+            .setVolumeName(volumeName).setBucketName(bucketName)
+            .setCreationTime(Time.now()).build();
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
 
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -153,14 +144,10 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
 
-    OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
-        omResponse, omKeyInfo, true, omBucketInfo);
+    OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo,
+            omResponse);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    String ozoneKey = addKeyToTable();
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
 
@@ -174,4 +161,22 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
 
   }
+
+  protected String addKeyToTable() throws Exception {
+    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+
+    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
+            clientID, replicationType, replicationFactor, omMetadataManager);
+    return ozoneKey;
+  }
+
+  protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    return new OMKeyDeleteResponse(omResponse, omKeyInfo, true, omBucketInfo);
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
new file mode 100644
index 0000000..3cfec38
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+
+/**
+ * Tests OMKeyDeleteResponse layout version V1.
+ */
+public class TestOMKeyDeleteResponseV1 extends TestOMKeyDeleteResponse {
+
+  @Override
+  protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    return new OMKeyDeleteResponseV1(omResponse, omKeyInfo,
+            true, getOmBucketInfo(), false);
+  }
+
+  @Override
+  protected String addKeyToTable() throws Exception {
+    // Add volume, bucket and key entries to OM DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, "", omMetadataManager);
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            keyName, omKeyInfo, -1, 50, omMetadataManager);
+    return omKeyInfo.getPath();
+  }
+
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(getOmBucketInfo());
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            getOmBucketInfo().getBucketName(), keyName, replicationType,
+            replicationFactor,
+            getOmBucketInfo().getObjectID() + 1,
+            getOmBucketInfo().getObjectID(), 100, Time.now());
+  }
+}
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 56c7e10..b4153f0 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
@@ -255,6 +256,7 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
     return true;
   }
 
+
   /**
    * Helper method to delete an object specified by key name in bucket.
    *
@@ -262,12 +264,32 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
    * @return true if the key is deleted, false otherwise
    */
   @Override
-  public boolean deleteObject(String keyName) {
+  public boolean deleteObject(String keyName) throws IOException {
+    return deleteObject(keyName, false);
+  }
+
+  /**
+   * Helper method to delete an object specified by key name in bucket.
+   *
+   * @param keyName key name to be deleted
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
+   * @return true if the key is deleted, false otherwise
+   */
+  @Override
+  public boolean deleteObject(String keyName, boolean recursive)
+      throws IOException {
     LOG.trace("issuing delete for key {}", keyName);
     try {
       incrementCounter(Statistic.OBJECTS_DELETED, 1);
-      bucket.deleteKey(keyName);
+      bucket.deleteDirectory(keyName, recursive);
       return true;
+    } catch (OMException ome) {
+      LOG.error("delete key failed {}", ome.getMessage());
+      if (OMException.ResultCodes.DIRECTORY_NOT_EMPTY == ome.getResult()) {
+        throw new PathIsNotEmptyDirectoryException(ome.getMessage());
+      }
+      return false;
     } catch (IOException ioe) {
       LOG.error("delete key failed {}", ioe.getMessage());
       return false;
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index d217da8..2f7b589 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -414,7 +414,17 @@ public class BasicOzoneFileSystem extends FileSystem {
   }
 
   private boolean renameV1(String srcPath, String dstPath) throws IOException {
-    adapter.renameKey(srcPath, dstPath);
+    try {
+      adapter.renameKey(srcPath, dstPath);
+    } catch (OMException ome) {
+      LOG.error("rename key failed: {}. source:{}, destin:{}",
+              ome.getMessage(), srcPath, dstPath);
+      if (OMException.ResultCodes.KEY_ALREADY_EXISTS == ome.getResult()) {
+        return false;
+      } else {
+        throw ome;
+      }
+    }
     return true;
   }
 
@@ -498,6 +508,20 @@ public class BasicOzoneFileSystem extends FileSystem {
     incrementCounter(Statistic.INVOCATION_DELETE, 1);
     statistics.incrementWriteOps(1);
     LOG.debug("Delete path {} - recursive {}", f, recursive);
+
+    String layOutVersion = adapter.getBucketLayoutVersion();
+    if (layOutVersion != null &&
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) {
+
+      if (f.isRoot()) {
+        LOG.warn("Cannot delete root directory.");
+        return false;
+      }
+
+      String key = pathToKey(f);
+      return adapter.deleteObject(key, recursive);
+    }
+
     FileStatus status;
     try {
       status = getFileStatus(f);
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 4ea08f2..84cba47 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -439,10 +440,13 @@ public class BasicRootedOzoneClientAdapterImpl
    * Helper method to delete an object specified by key name in bucket.
    *
    * @param path path to a key to be deleted
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
    * @return true if the key is deleted, false otherwise
    */
   @Override
-  public boolean deleteObject(String path) {
+  public boolean deleteObject(String path, boolean recursive)
+      throws IOException {
     LOG.trace("issuing delete for path to key: {}", path);
     incrementCounter(Statistic.OBJECTS_DELETED, 1);
     OFSPath ofsPath = new OFSPath(path);
@@ -452,14 +456,25 @@ public class BasicRootedOzoneClientAdapterImpl
     }
     try {
       OzoneBucket bucket = getBucket(ofsPath, false);
-      bucket.deleteKey(keyName);
+      bucket.deleteDirectory(keyName, recursive);
       return true;
+    } catch (OMException ome) {
+      LOG.error("delete key failed {}", ome.getMessage());
+      if (OMException.ResultCodes.DIRECTORY_NOT_EMPTY == ome.getResult()) {
+        throw new PathIsNotEmptyDirectoryException(ome.getMessage());
+      }
+      return false;
     } catch (IOException ioe) {
       LOG.error("delete key failed " + ioe.getMessage());
       return false;
     }
   }
 
+  @Override
+  public boolean deleteObject(String path) throws IOException {
+    return deleteObject(path, false);
+  }
+
   /**
    * Helper function to check if the list of key paths are in the same volume
    * and same bucket.
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
index 5b65a0e..4a4d91b 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
@@ -51,7 +51,9 @@ public interface OzoneClientAdapter {
 
   boolean createDirectory(String keyName) throws IOException;
 
-  boolean deleteObject(String keyName);
+  boolean deleteObject(String keyName) throws IOException;
+
+  boolean deleteObject(String keyName, boolean recursive) throws IOException;
 
   boolean deleteObjects(List<String> keyName);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 04/19: HDDS-4357: Rename : make rename an atomic ops by updating key path entry in dir/file table (#1557)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit aa70ea568704143bad4f9fda7369bee408475db6
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Thu Nov 19 21:23:45 2020 +0530

    HDDS-4357: Rename : make rename an atomic ops by updating key path entry in dir/file table (#1557)
    
    HDDS-4357: Rename : make rename an atomic ops by updating key path entry in dir/file table
---
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   2 +
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |   4 +
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  43 +++
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       | 278 ++++++++++++++++++++
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 173 +++++++++++-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  27 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |   2 +
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../om/request/bucket/OMBucketCreateRequest.java   |  24 ++
 .../ozone/om/request/file/OMFileRequest.java       |  94 +++++++
 .../ozone/om/request/key/OMKeyRenameRequestV1.java | 292 +++++++++++++++++++++
 .../ozone/om/response/key/OMKeyRenameResponse.java |  11 +
 ...ameResponse.java => OMKeyRenameResponseV1.java} |  68 +++--
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |   5 +
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  13 +
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |   7 +
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java |   1 +
 17 files changed, 1001 insertions(+), 47 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index dcba74b..8dda04e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -241,4 +241,6 @@ public final class OMConfigKeys {
   public static final String OZONE_OM_LAYOUT_VERSION =
           "ozone.om.layout.version";
   public static final String OZONE_OM_LAYOUT_VERSION_DEFAULT = "V0";
+
+  public static final String OZONE_OM_LAYOUT_VERSION_V1 = "V1";
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index b6fb404..d097714 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -286,6 +286,10 @@ public final class OmKeyInfo extends WithObjectID {
     return OzoneAclUtil.setAcl(acls, newAcls);
   }
 
+  public void setParentObjectID(long parentObjectID) {
+    this.parentObjectID = parentObjectID;
+  }
+
   /**
    * Builder of OmKeyInfo.
    */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index 63bfd8f..e9d4cf9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -162,4 +162,47 @@ public final class OzoneFSUtils {
 
     return parentPath.equals(childParent);
   }
+
+  /**
+   * The function returns parent directory from the given absolute path. For
+   * example, the given key path '/a/b/c/d/e/file1' then it returns parent
+   * directory name as 'e'.
+   *
+   * @param keyName key name
+   * @return parent directory. If not found then return keyName itself.
+   */
+  public static String getParentDir(@Nonnull String keyName) {
+    java.nio.file.Path fileName = Paths.get(keyName).getParent();
+    if (fileName != null) {
+      return fileName.toString();
+    }
+    // failed to find a parent directory.
+    return keyName;
+  }
+
+  /**
+   * This function appends the given file name to the given key name path.
+   *
+   * @param keyName key name
+   * @param fileName  file name
+   * @return full path
+   */
+  public static String appendFileNameToKeyPath(String keyName,
+                                               String fileName) {
+    StringBuilder newToKeyName = new StringBuilder(keyName);
+    newToKeyName.append(OZONE_URI_DELIMITER);
+    newToKeyName.append(fileName);
+    return newToKeyName.toString();
+  }
+
+  /**
+   * Returns the number of path components in the given keyName.
+   *
+   * @param keyName keyname
+   * @return path components count
+   */
+  public static int getFileCount(String keyName) {
+    java.nio.file.Path keyPath = Paths.get(keyName);
+    return keyPath.getNameCount();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index ab0865d..394abf9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -629,6 +629,16 @@ public class TestOzoneFileSystem {
       stream.seek(fileLength);
       assertEquals(-1, stream.read());
     }
+
+    // non-existent file
+    Path fileNotExists = new Path("/file_notexist");
+    try {
+      fs.open(fileNotExists);
+      Assert.fail("Should throw FILE_NOT_FOUND error as file doesn't exist!");
+    } catch (FileNotFoundException fnfe) {
+      Assert.assertTrue("Expected FILE_NOT_FOUND error",
+              fnfe.getMessage().contains("FILE_NOT_FOUND"));
+    }
   }
 
   @Test
@@ -663,9 +673,276 @@ public class TestOzoneFileSystem {
         interimPath.getName(), fileStatus.getPath().getName());
   }
 
+  /**
+   * Case-1) fromKeyName should exist, otw throws exception.
+   */
+  @Test
+  public void testRenameWithNonExistentSource() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final String dir2 = root + "/dir2";
+    final Path source = new Path(fs.getUri().toString() + dir1);
+    final Path destin = new Path(fs.getUri().toString() + dir2);
+
+    // creates destin
+    fs.mkdirs(destin);
+    LOG.info("Created destin dir: {}", destin);
+
+    LOG.info("Rename op-> source:{} to destin:{}}", source, destin);
+    assertFalse("Expected to fail rename as src doesn't exist",
+            fs.rename(source, destin));
+  }
+
+  /**
+   * Case-2) Cannot rename a directory to its own subdirectory.
+   */
+  @Test
+  public void testRenameDirToItsOwnSubDir() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final Path dir1Path = new Path(fs.getUri().toString() + dir1);
+    // Add a sub-dir1 to the directory to be moved.
+    final Path subDir1 = new Path(dir1Path, "sub_dir1");
+    fs.mkdirs(subDir1);
+    LOG.info("Created dir1 {}", subDir1);
+
+    final Path sourceRoot = new Path(fs.getUri().toString() + root);
+    LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
+    try {
+      fs.rename(sourceRoot, subDir1);
+      Assert.fail("Should throw exception : Cannot rename a directory to" +
+              " its own subdirectory");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+  }
+
+  /**
+   * Case-3) If src == destin then check source and destin of same type.
+   */
+  @Test
+  public void testRenameSourceAndDestinAreSame() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2Path = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2Path);
+
+    // File rename
+    Path file1 = new Path(fs.getUri().toString() + dir2 + "/file1");
+    ContractTestUtils.touch(fs, file1);
+
+    assertTrue(fs.rename(file1, file1));
+    assertTrue(fs.rename(dir2Path, dir2Path));
+  }
+
+  /**
+   * Case-4) Rename from /a, to /b.
+   * <p>
+   * Expected Result: After rename the directory structure will be /b/a.
+   */
+  @Test
+  public void testRenameToExistingDir() throws Exception {
+    // created /a
+    final Path aSourcePath = new Path(fs.getUri().toString() + "/a");
+    fs.mkdirs(aSourcePath);
+
+    // created /b
+    final Path bDestinPath = new Path(fs.getUri().toString() + "/b");
+    fs.mkdirs(bDestinPath);
+
+    // Add a sub-directory '/a/c' to '/a'. This is to verify that after
+    // rename sub-directory also be moved.
+    final Path acPath = new Path(fs.getUri().toString() + "/a/c");
+    fs.mkdirs(acPath);
+
+    // Rename from /a to /b.
+    assertTrue("Rename failed", fs.rename(aSourcePath, bDestinPath));
+
+    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
+    final Path bacPath = new Path(fs.getUri().toString() + "/b/a/c");
+    assertTrue("Rename failed", fs.exists(baPath));
+    assertTrue("Rename failed", fs.exists(bacPath));
+  }
+
+  /**
+   * Case-5) If new destin '/dst/source' exists then throws exception.
+   * If destination is a directory then rename source as sub-path of it.
+   * <p>
+   * For example: rename /a to /b will lead to /b/a. This new path should
+   * not exist.
+   */
+  @Test
+  public void testRenameToNewSubDirShouldNotExist() throws Exception {
+    // Case-5.a) Rename directory from /a to /b.
+    // created /a
+    final Path aSourcePath = new Path(fs.getUri().toString() + "/a");
+    fs.mkdirs(aSourcePath);
+
+    // created /b
+    final Path bDestinPath = new Path(fs.getUri().toString() + "/b");
+    fs.mkdirs(bDestinPath);
+
+    // Add a sub-directory '/b/a' to '/b'. This is to verify that rename
+    // throws exception as new destin /b/a already exists.
+    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
+    fs.mkdirs(baPath);
+
+    Assert.assertFalse("New destin sub-path /b/a already exists",
+            fs.rename(aSourcePath, bDestinPath));
+
+    // Case-5.b) Rename file from /a/b/c/file1 to /a.
+    // Should be failed since /a/file1 exists.
+    final Path abcPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcPath);
+    Path abcFile1 = new Path(abcPath, "/file1");
+    ContractTestUtils.touch(fs, abcFile1);
+
+    final Path aFile1 = new Path(fs.getUri().toString() + "/a/file1");
+    ContractTestUtils.touch(fs, aFile1);
+
+    final Path aDestinPath = new Path(fs.getUri().toString() + "/a");
+
+    Assert.assertFalse("New destin sub-path /b/a already exists",
+            fs.rename(abcFile1, aDestinPath));
+  }
+
+  /**
+   * Case-6) Rename directory to an existed file, should be failed.
+   */
+  @Test
+  public void testRenameDirToFile() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, file1Destin);
+    Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcRootPath);
+    Assert.assertFalse("key already exists /root_dir/file1",
+            fs.rename(abcRootPath, file1Destin));
+  }
+
+  /**
+   * Rename file to a non-existent destin file.
+   */
+  @Test
+  public void testRenameFile() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Source = new Path(fs.getUri().toString() + root
+            + "/file1_Copy");
+    ContractTestUtils.touch(fs, file1Source);
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    assertTrue("Renamed failed", fs.rename(file1Source, file1Destin));
+    assertTrue("Renamed failed: /root/file1", fs.exists(file1Destin));
+
+    /**
+     * Reading several times, this is to verify that OmKeyInfo#keyName cached
+     * entry is not modified. While reading back, OmKeyInfo#keyName will be
+     * prepared and assigned to fullkeyPath name.
+     */
+    for (int i = 0; i < 10; i++) {
+      FileStatus[] fStatus = fs.listStatus(rootPath);
+      assertEquals("Renamed failed", 1, fStatus.length);
+      assertEquals("Wrong path name!", file1Destin, fStatus[0].getPath());
+    }
+  }
+
+  /**
+   * Rename file to an existed directory.
+   */
+  @Test
+  public void testRenameFileToDir() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, file1Destin);
+    Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcRootPath);
+    assertTrue("Renamed failed", fs.rename(file1Destin, abcRootPath));
+    assertTrue("Renamed filed: /a/b/c/file1", fs.exists(new Path(abcRootPath,
+            "file1")));
+  }
+
+
+  /**
+   * Fails if the (a) parent of dst does not exist or (b) parent is a file.
+   */
+  @Test
+  public void testRenameDestinationParentDoesntExist() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2SourcePath);
+
+    // (a) parent of dst does not exist.  /root_dir/b/c
+    final Path destinPath = new Path(fs.getUri().toString() + root + "/b/c");
+    try {
+      fs.rename(dir2SourcePath, destinPath);
+      Assert.fail("Should fail as parent of dst does not exist!");
+    } catch (FileNotFoundException fnfe) {
+      // expected
+    }
+
+    // (b) parent of dst is a file. /root_dir/file1/c
+    Path filePath = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, filePath);
+
+    Path newDestinPath = new Path(filePath, "c");
+    try {
+      fs.rename(dir2SourcePath, newDestinPath);
+      Assert.fail("Should fail as parent of dst is a file!");
+    } catch (IOException ioe) {
+      // expected
+    }
+  }
+
+  /**
+   * Rename to the source's parent directory, it will succeed.
+   * 1. Rename from /root_dir/dir1/dir2 to /root_dir.
+   * Expected result : /root_dir/dir2
+   * <p>
+   * 2. Rename from /root_dir/dir1/file1 to /root_dir.
+   * Expected result : /root_dir/file1.
+   */
+  @Test
+  public void testRenameToParentDir() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2SourcePath);
+    final Path destRootPath = new Path(fs.getUri().toString() + root);
+
+    Path file1Source = new Path(fs.getUri().toString() + dir1 + "/file2");
+    ContractTestUtils.touch(fs, file1Source);
+
+    // rename source directory to its parent directory(destination).
+    assertTrue("Rename failed", fs.rename(dir2SourcePath, destRootPath));
+    final Path expectedPathAfterRename =
+            new Path(fs.getUri().toString() + root + "/dir2");
+    assertTrue("Rename failed",
+            fs.exists(expectedPathAfterRename));
+
+    // rename source file to its parent directory(destination).
+    assertTrue("Rename failed", fs.rename(file1Source, destRootPath));
+    final Path expectedFilePathAfterRename =
+            new Path(fs.getUri().toString() + root + "/file2");
+    assertTrue("Rename failed",
+            fs.exists(expectedFilePathAfterRename));
+  }
+
   @Test
   public void testRenameDir() throws Exception {
     final String dir = "/root_dir/dir1";
+    Path rootDir = new Path(fs.getUri().toString() +  "/root_dir");
     final Path source = new Path(fs.getUri().toString() + dir);
     final Path dest = new Path(source.toString() + ".renamed");
     // Add a sub-dir to the directory to be moved.
@@ -684,6 +961,7 @@ public class TestOzoneFileSystem {
     LambdaTestUtils.intercept(IllegalArgumentException.class, "Wrong FS",
         () -> fs.rename(new Path(fs.getUri().toString() + "fake" + dir), dest));
   }
+
   private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory)
       throws IOException {
     String key = o3fs.pathToKey(keyPath);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index 6868040..e2b7887 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
@@ -39,6 +40,7 @@ import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.junit.Assert;
@@ -329,12 +331,136 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   }
 
   /**
+   * Case-1) fromKeyName should exist, otw throws exception.
+   */
+  @Test
+  public void testRenameWithNonExistentSource() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final String dir2 = root + "/dir2";
+    final Path source = new Path(fs.getUri().toString() + dir1);
+    final Path destin = new Path(fs.getUri().toString() + dir2);
+
+    // creates destin
+    fs.mkdirs(destin);
+    LOG.info("Created destin dir: {}", destin);
+
+    LOG.info("Rename op-> source:{} to destin:{}}", source, destin);
+    try {
+      fs.rename(source, destin);
+      Assert.fail("Should throw exception : Source doesn't exist!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
+    }
+  }
+
+  /**
+   * Case-2) Cannot rename a directory to its own subdirectory.
+   */
+  @Test
+  public void testRenameDirToItsOwnSubDir() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final Path dir1Path = new Path(fs.getUri().toString() + dir1);
+    // Add a sub-dir1 to the directory to be moved.
+    final Path subDir1 = new Path(dir1Path, "sub_dir1");
+    fs.mkdirs(subDir1);
+    LOG.info("Created dir1 {}", subDir1);
+
+    final Path sourceRoot = new Path(fs.getUri().toString() + root);
+    LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
+    try {
+      fs.rename(sourceRoot, subDir1);
+      Assert.fail("Should throw exception : Cannot rename a directory to" +
+              " its own subdirectory");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
+    }
+  }
+
+  /**
+   * Case-5) If new destin '/dst/source' exists then throws exception.
+   * If destination is a directory then rename source as sub-path of it.
+   * <p>
+   * For example: rename /a to /b will lead to /b/a. This new path should
+   * not exist.
+   */
+  @Test
+  public void testRenameToNewSubDirShouldNotExist() throws Exception {
+    // Case-5.a) Rename directory from /a to /b.
+    // created /a
+    final Path aSourcePath = new Path(fs.getUri().toString() + "/a");
+    fs.mkdirs(aSourcePath);
+
+    // created /b
+    final Path bDestinPath = new Path(fs.getUri().toString() + "/b");
+    fs.mkdirs(bDestinPath);
+
+    // Add a sub-directory '/b/a' to '/b'. This is to verify that rename
+    // throws exception as new destin /b/a already exists.
+    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
+    fs.mkdirs(baPath);
+
+    try {
+      fs.rename(aSourcePath, bDestinPath);
+      Assert.fail("Should fail as new destination dir exists!");
+    } catch (OMException ome) {
+      // expected as new sub-path /b/a already exists.
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
+    }
+
+    // Case-5.b) Rename file from /a/b/c/file1 to /a.
+    // Should be failed since /a/file1 exists.
+    final Path abcPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcPath);
+    Path abcFile1 = new Path(abcPath, "/file1");
+    ContractTestUtils.touch(fs, abcFile1);
+
+    final Path aFile1 = new Path(fs.getUri().toString() + "/a/file1");
+    ContractTestUtils.touch(fs, aFile1);
+
+    final Path aDestinPath = new Path(fs.getUri().toString() + "/a");
+
+    try {
+      fs.rename(abcFile1, aDestinPath);
+      Assert.fail("Should fail as new destination file exists!");
+    } catch (OMException ome) {
+      // expected as new sub-path /b/a already exists.
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
+    }
+  }
+
+  /**
+   * Case-6) Rename directory to an existed file, should be failed.
+   */
+  @Test
+  public void testRenameDirToFile() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, file1Destin);
+    Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcRootPath);
+    try {
+      fs.rename(abcRootPath, file1Destin);
+      Assert.fail("key already exists /root_dir/file1");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
+    }
+  }
+
+  /**
    * Cleanup keyTable and directoryTable explicitly as FS delete operation
    * is not yet supported.
    *
    * @throws IOException DB failure
    */
-  protected void tableCleanup() throws IOException {
+  private void tableCleanup() throws IOException {
     OMMetadataManager metadataMgr = cluster.getOzoneManager()
             .getMetadataManager();
     TableIterator<String, ? extends
@@ -382,8 +508,8 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
       keyTableIterator.next();
     }
 
-    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
-            keyCacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
+            keyCacheIterator = metadataMgr.getKeyTable().cacheIterator();
     while(keyCacheIterator.hasNext()){
       keyCacheIterator.next();
       keyCacheIterator.remove();
@@ -401,6 +527,41 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     rootItemCount = 0;
   }
 
+  /**
+   * Fails if the (a) parent of dst does not exist or (b) parent is a file.
+   */
+  @Test
+  public void testRenameDestinationParentDoesntExist() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2SourcePath);
+
+    // (a) parent of dst does not exist.  /root_dir/b/c
+    final Path destinPath = new Path(fs.getUri().toString() + root + "/b/c");
+    try {
+      fs.rename(dir2SourcePath, destinPath);
+      Assert.fail("Should fail as parent of dst does not exist!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
+    }
+
+    // (b) parent of dst is a file. /root_dir/file1/c
+    Path filePath = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, filePath);
+
+    Path newDestinPath = new Path(filePath, "c");
+    try {
+      fs.rename(dir2SourcePath, newDestinPath);
+      Assert.fail("Should fail as parent of dst is a file!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
+    }
+  }
+
   @Override
   @Test
   @Ignore("TODO:HDDS-2939")
@@ -412,4 +573,10 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   @Ignore("TODO:HDDS-2939")
   public void testRenameToTrashEnabled() throws Exception {
   }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testListStatusWithIntermediateDir() throws Exception {
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 4a4327d..8aaca5f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2048,10 +2048,16 @@ public class KeyManagerImpl implements KeyManager {
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
-    OzoneFileStatus fileStatus = getOzoneFileStatus(volumeName, bucketName,
-            keyName, args.getRefreshPipeline(), args.getSortDatanodes(),
-            clientAddress);
-      //if key is not of type file or if key is not found we throw an exception
+    OzoneFileStatus fileStatus;
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      fileStatus = getOzoneFileStatusV1(volumeName, bucketName, keyName,
+              args.getSortDatanodes(), clientAddress, false);
+    } else {
+      fileStatus = getOzoneFileStatus(volumeName, bucketName,
+              keyName, args.getRefreshPipeline(), args.getSortDatanodes(),
+              clientAddress);
+    }
+    //if key is not of type file or if key is not found we throw an exception
     if (fileStatus.isFile()) {
       // add block token for read.
       addBlockToken4Read(fileStatus.getKeyInfo());
@@ -2534,13 +2540,18 @@ public class KeyManagerImpl implements KeyManager {
         continue;
       }
 
-      cacheOmKeyInfo.setFileName(cacheOmKeyInfo.getKeyName());
+      // make OmKeyInfo local copy to reset keyname to "fullKeyPath".
+      // In DB keyName stores only the leaf node but the list
+      // returning to the user should have full path.
+      OmKeyInfo omKeyInfo = cacheOmKeyInfo.copyObject();
+
+      omKeyInfo.setFileName(omKeyInfo.getKeyName());
       String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
-              cacheOmKeyInfo.getKeyName());
-      cacheOmKeyInfo.setKeyName(fullKeyPath);
+              omKeyInfo.getKeyName());
+      omKeyInfo.setKeyName(fullKeyPath);
 
       countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
-              seekKeyInDB, startKey, countEntries, cacheKey, cacheOmKeyInfo,
+              seekKeyInDB, startKey, countEntries, cacheKey, omKeyInfo,
               false);
     }
     return countEntries;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index b59eedb..30c1d37 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -3714,6 +3714,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     boolean omLayoutVersionV1 =
             StringUtils.equalsIgnoreCase(version, "V1");
     OzoneManagerRatisUtils.setOmLayoutVersionV1(omLayoutVersionV1);
+    LOG.info("Configured {}={} and enabled:{} optimized OM FS operations",
+            OZONE_OM_LAYOUT_VERSION, version, omLayoutVersionV1);
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index d4c0f17..d2dd5c7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeysRenameRequest;
 import org.apache.hadoop.ozone.om.request.key.OMTrashRecoverRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
@@ -149,6 +150,9 @@ public final class OzoneManagerRatisUtils {
     case DeleteKeys:
       return new OMKeysDeleteRequest(omRequest);
     case RenameKey:
+      if (omLayoutVersionV1) {
+        return new OMKeyRenameRequestV1(omRequest);
+      }
       return new OMKeyRenameRequest(omRequest);
     case RenameKeys:
       return new OMKeysRenameRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index 1130f94..d3b3f61 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -20,14 +20,18 @@ package org.apache.hadoop.ozone.om.request.bucket;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.stream.Collectors;
 
 import com.google.common.base.Optional;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
@@ -155,6 +159,9 @@ public class OMBucketCreateRequest extends OMClientRequest {
         getOmRequest());
     OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
 
+    // Add layout version V1 to bucket info
+    addLayoutVersionToBucket(ozoneManager, omBucketInfo);
+
     AuditLogger auditLogger = ozoneManager.getAuditLogger();
     OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
 
@@ -360,4 +367,21 @@ public class OMBucketCreateRequest extends OMClientRequest {
 
   }
 
+  private void addLayoutVersionToBucket(OzoneManager ozoneManager,
+                                        OmBucketInfo omBucketInfo) {
+    Map<String, String> metadata = omBucketInfo.getMetadata();
+    if (metadata == null) {
+      metadata = new HashMap<>();
+    }
+    OzoneConfiguration configuration = ozoneManager.getConfiguration();
+    // TODO: Many unit test cases has null config and done a simple null
+    //  check now. It can be done later, to avoid massive test code changes.
+    if (configuration != null) {
+      String layOutVersion = configuration
+              .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION,
+                      OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT);
+      metadata.put(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, layOutVersion);
+      omBucketInfo.setMetadata(metadata);
+    }
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 5225d82..e7b43d6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -665,4 +666,97 @@ public final class OMFileRequest {
     }
     return prefixName.concat(OzoneConsts.OZONE_URI_DELIMITER).concat(fileName);
   }
+
+  /**
+   * Build DirectoryInfo from OmKeyInfo.
+   *
+   * @param keyInfo omKeyInfo
+   * @return omDirectoryInfo object
+   */
+  public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo){
+    OmDirectoryInfo.Builder builder = new OmDirectoryInfo.Builder();
+    builder.setParentObjectID(keyInfo.getParentObjectID());
+    builder.setAcls(keyInfo.getAcls());
+    builder.addAllMetadata(keyInfo.getMetadata());
+    builder.setCreationTime(keyInfo.getCreationTime());
+    builder.setModificationTime(keyInfo.getModificationTime());
+    builder.setObjectID(keyInfo.getObjectID());
+    builder.setUpdateID(keyInfo.getUpdateID());
+    builder.setName(OzoneFSUtils.getFileName(keyInfo.getKeyName()));
+    return builder.build();
+  }
+
+  /**
+   * Verify that the given toKey directory is a sub directory of fromKey
+   * directory.
+   * <p>
+   * For example, special case of renaming a directory to its own
+   * sub-directory is not allowed.
+   *
+   * @param fromKeyName source path
+   * @param toKeyName   destination path
+   * @param isDir       true represents a directory type otw a file type
+   * @throws OMException if the dest dir is a sub-dir of source dir.
+   */
+  public static void verifyToDirIsASubDirOfFromDirectory(String fromKeyName,
+      String toKeyName, boolean isDir) throws OMException {
+    if (!isDir) {
+      return;
+    }
+    Path dstParent = Paths.get(toKeyName).getParent();
+    while (dstParent != null) {
+      if (Paths.get(fromKeyName).equals(dstParent)) {
+        throw new OMException("Cannot rename a directory to its own " +
+                "subdirectory", OMException.ResultCodes.KEY_RENAME_ERROR);
+        // TODO: Existing rename throws java.lang.IllegalArgumentException.
+        //       Should we throw same exception ?
+      }
+      dstParent = dstParent.getParent();
+    }
+  }
+
+  /**
+   * Verify parent exists for the destination path and return destination
+   * path parent Id.
+   * <p>
+   * Check whether dst parent dir exists or not. If the parent exists, then the
+   * source can be renamed to dst path.
+   *
+   * @param volumeName  volume name
+   * @param bucketName  bucket name
+   * @param toKeyName   destination path
+   * @param fromKeyName source path
+   * @param metaMgr     metadata manager
+   * @throws IOException if the destination parent dir doesn't exists.
+   */
+  public static long getToKeyNameParentId(String volumeName,
+      String bucketName, String toKeyName, String fromKeyName,
+      OMMetadataManager metaMgr) throws IOException {
+
+    int totalDirsCount = OzoneFSUtils.getFileCount(toKeyName);
+    // skip parent is root '/'
+    if (totalDirsCount <= 1) {
+      String bucketKey = metaMgr.getBucketKey(volumeName, bucketName);
+      OmBucketInfo omBucketInfo =
+              metaMgr.getBucketTable().get(bucketKey);
+      return omBucketInfo.getObjectID();
+    }
+
+    String toKeyParentDir = OzoneFSUtils.getParentDir(toKeyName);
+
+    OzoneFileStatus toKeyParentDirStatus = getOMKeyInfoIfExists(metaMgr,
+            volumeName, bucketName, toKeyParentDir, 0);
+    // check if the immediate parent exists
+    if (toKeyParentDirStatus == null) {
+      throw new OMException(String.format(
+              "Failed to rename %s to %s, %s doesn't exist", fromKeyName,
+              toKeyName, toKeyParentDir),
+              OMException.ResultCodes.KEY_RENAME_ERROR);
+    } else if (toKeyParentDirStatus.isFile()){
+      throw new OMException(String.format(
+              "Failed to rename %s to %s, %s is a file", fromKeyName, toKeyName,
+              toKeyParentDir), OMException.ResultCodes.KEY_RENAME_ERROR);
+    }
+    return toKeyParentDirStatus.getKeyInfo().getObjectID();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
new file mode 100644
index 0000000..74e53fe
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
@@ -0,0 +1,292 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles rename key request layout version V1.
+ */
+public class OMKeyRenameRequestV1 extends OMKeyRenameRequest {
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(OMKeyRenameRequestV1.class);
+
+  public OMKeyRenameRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest();
+    KeyArgs keyArgs = renameKeyRequest.getKeyArgs();
+    Map<String, String> auditMap = buildAuditMap(keyArgs, renameKeyRequest);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String fromKeyName = keyArgs.getKeyName();
+    String toKeyName = renameKeyRequest.getToKeyName();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumKeyRenames();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+            getOmRequest());
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    boolean acquiredLock = false;
+    OMClientResponse omClientResponse = null;
+    IOException exception = null;
+    OmKeyInfo fromKeyValue;
+    String fromKey = null;
+    Result result;
+    try {
+      if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
+        throw new OMException("Key name is empty",
+                OMException.ResultCodes.INVALID_KEY_NAME);
+      }
+
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acls to see if user has access to perform delete operation on
+      // old key and create operation on new key
+      checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName,
+              IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY);
+      checkKeyAcls(ozoneManager, volumeName, bucketName, toKeyName,
+              IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
+
+      // Validate bucket and volume exists or not.
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      // Check if fromKey exists
+      OzoneFileStatus fromKeyFileStatus =
+              OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName,
+                      bucketName, fromKeyName, 0);
+      // case-1) fromKeyName should exist, otw throws exception
+      if (fromKeyFileStatus == null) {
+        // TODO: Add support for renaming open key
+        throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND);
+      }
+
+      // source existed
+      fromKeyValue = fromKeyFileStatus.getKeyInfo();
+      boolean isRenameDirectory = fromKeyFileStatus.isDirectory();
+
+      // case-2) Cannot rename a directory to its own subdirectory
+      OMFileRequest.verifyToDirIsASubDirOfFromDirectory(fromKeyName,
+              toKeyName, fromKeyFileStatus.isDirectory());
+
+      OzoneFileStatus toKeyFileStatus =
+              OMFileRequest.getOMKeyInfoIfExists(omMetadataManager,
+                      volumeName, bucketName, toKeyName, 0);
+
+      // Check if toKey exists.
+      if(toKeyFileStatus != null) {
+        // Destination exists and following are different cases:
+        OmKeyInfo toKeyValue = toKeyFileStatus.getKeyInfo();
+
+        if (fromKeyValue.getKeyName().equals(toKeyValue.getKeyName())) {
+          // case-3) If src == destin then check source and destin of same type
+          // (a) If dst is a file then return true.
+          // (b) Otherwise throws exception.
+          // TODO: Discuss do we need to throw exception for file as well.
+          if (toKeyFileStatus.isFile()) {
+            result = Result.SUCCESS;
+          } else {
+            throw new OMException("Key already exists " + toKeyName,
+                    OMException.ResultCodes.KEY_ALREADY_EXISTS);
+          }
+        } else if (toKeyFileStatus.isDirectory()) {
+          // case-4) If dst is a directory then rename source as sub-path of it
+          // For example: rename /source to /dst will lead to /dst/source
+          String fromFileName = OzoneFSUtils.getFileName(fromKeyName);
+          String newToKeyName = OzoneFSUtils.appendFileNameToKeyPath(toKeyName,
+                  fromFileName);
+          OzoneFileStatus newToOzoneFileStatus =
+                  OMFileRequest.getOMKeyInfoIfExists(omMetadataManager,
+                          volumeName, bucketName, newToKeyName, 0);
+
+          if (newToOzoneFileStatus != null) {
+            // case-5) If new destin '/dst/source' exists then throws exception
+            throw new OMException(String.format(
+                    "Failed to rename %s to %s, file already exists or not " +
+                            "empty!", fromKeyName, newToKeyName),
+                    OMException.ResultCodes.KEY_ALREADY_EXISTS);
+          }
+
+          omClientResponse = renameKey(toKeyValue.getObjectID(), trxnLogIndex,
+                  fromKeyValue, isRenameDirectory, newToKeyName,
+                  keyArgs.getModificationTime(), omResponse, ozoneManager);
+          result = Result.SUCCESS;
+        } else {
+          // case-6) If destination is a file type and if exists then throws
+          // key already exists exception.
+          throw new OMException("Failed to rename, key already exists "
+                  + toKeyName, OMException.ResultCodes.KEY_ALREADY_EXISTS);
+        }
+      } else {
+        // Destination doesn't exist and the cases are:
+        // case-7) Check whether dst parent dir exists or not. If parent
+        // doesn't exist then throw exception, otw the source can be renamed to
+        // destination path.
+        long toKeyParentId = OMFileRequest.getToKeyNameParentId(volumeName,
+                bucketName, toKeyName, fromKeyName, omMetadataManager);
+
+        omClientResponse = renameKey(toKeyParentId, trxnLogIndex,
+                fromKeyValue, isRenameDirectory, toKeyName,
+                keyArgs.getModificationTime(), omResponse, ozoneManager);
+
+        result = Result.SUCCESS;
+      }
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new OMKeyRenameResponse(createErrorOMResponse(
+              omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+              omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+                bucketName);
+      }
+    }
+
+    auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap,
+            exception, getOmRequest().getUserInfo()));
+
+    switch (result) {
+    case SUCCESS:
+      LOG.debug("Rename Key is successfully completed for volume:{} bucket:{}" +
+                      " fromKey:{} toKey:{}. ", volumeName, bucketName,
+              fromKeyName, toKeyName);
+      break;
+    case FAILURE:
+      ozoneManager.getMetrics().incNumKeyRenameFails();
+      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} " +
+                      "toKey:{}. Key: {} not found.", volumeName, bucketName,
+              fromKeyName, toKeyName, fromKeyName);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeyRenameRequest: {}",
+              renameKeyRequest);
+    }
+    return omClientResponse;
+  }
+
+  @SuppressWarnings("parameternumber")
+  private OMClientResponse renameKey(long toKeyParentId,
+      long trxnLogIndex, OmKeyInfo fromKeyValue, boolean isRenameDirectory,
+      String toKeyName, long modificationTime, OMResponse.Builder omResponse,
+      OzoneManager ozoneManager) {
+
+    String dbFromKey = fromKeyValue.getPath();
+    String toKeyFileName = OzoneFSUtils.getFileName(toKeyName);
+
+    fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+    // Set toFileName
+    fromKeyValue.setKeyName(toKeyFileName);
+    fromKeyValue.setFileName(toKeyFileName);
+    // Set toKeyObjectId
+    fromKeyValue.setParentObjectID(toKeyParentId);
+    //Set modification time
+    fromKeyValue.setModificationTime(modificationTime);
+
+    // destination dbKeyName
+    String dbToKey = fromKeyValue.getPath();
+
+    // Add to cache.
+    // dbFromKey should be deleted, dbToKey should be added with newly updated
+    // omKeyInfo.
+    // Add from_key and to_key details into cache.
+    OMMetadataManager metadataMgr = ozoneManager.getMetadataManager();
+    if (isRenameDirectory) {
+      Table<String, OmDirectoryInfo> dirTable = metadataMgr.getDirectoryTable();
+      dirTable.addCacheEntry(new CacheKey<>(dbFromKey),
+              new CacheValue<>(Optional.absent(), trxnLogIndex));
+
+      dirTable.addCacheEntry(new CacheKey<>(dbToKey),
+              new CacheValue<>(Optional.of(OMFileRequest.
+                              getDirectoryInfo(fromKeyValue)), trxnLogIndex));
+    } else {
+      Table<String, OmKeyInfo> keyTable = metadataMgr.getKeyTable();
+
+      keyTable.addCacheEntry(new CacheKey<>(dbFromKey),
+              new CacheValue<>(Optional.absent(), trxnLogIndex));
+
+      keyTable.addCacheEntry(new CacheKey<>(dbToKey),
+              new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex));
+    }
+
+    OMClientResponse omClientResponse = new OMKeyRenameResponseV1(omResponse
+            .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(),
+            dbFromKey, dbToKey, fromKeyValue, isRenameDirectory);
+    return omClientResponse;
+  }
+
+  private Map<String, String> buildAuditMap(
+          KeyArgs keyArgs, RenameKeyRequest renameKeyRequest) {
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    auditMap.remove(OzoneConsts.KEY);
+    auditMap.put(OzoneConsts.SRC_KEY, keyArgs.getKeyName());
+    auditMap.put(OzoneConsts.DST_KEY, renameKeyRequest.getToKeyName());
+    return auditMap;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
index 7470b37..3b7edf1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
@@ -70,4 +70,15 @@ public class OMKeyRenameResponse extends OMClientResponse {
         renameKeyInfo);
   }
 
+  public OmKeyInfo getRenameKeyInfo() {
+    return renameKeyInfo;
+  }
+
+  public String getFromKeyName() {
+    return fromKeyName;
+  }
+
+  public String getToKeyName() {
+    return toKeyName;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
similarity index 50%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
index 7470b37..7a9b159 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
@@ -18,56 +18,52 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
-import java.io.IOException;
 import javax.annotation.Nonnull;
+import java.io.IOException;
 
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.*;
 
 /**
- * Response for RenameKey request.
+ * Response for RenameKey request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = {KEY_TABLE})
-public class OMKeyRenameResponse extends OMClientResponse {
-
-  private String fromKeyName;
-  private String toKeyName;
-  private OmKeyInfo renameKeyInfo;
+@CleanupTableInfo(cleanupTables = {FILE_TABLE, DIRECTORY_TABLE})
+public class OMKeyRenameResponseV1 extends OMKeyRenameResponse {
 
-  public OMKeyRenameResponse(@Nonnull OMResponse omResponse,
-      String fromKeyName, String toKeyName, @Nonnull OmKeyInfo renameKeyInfo) {
-    super(omResponse);
-    this.fromKeyName = fromKeyName;
-    this.toKeyName = toKeyName;
-    this.renameKeyInfo = renameKeyInfo;
-  }
+  private boolean isRenameDirectory;
 
-  /**
-   * For when the request is not successful.
-   * For a successful request, the other constructor should be used.
-   */
-  public OMKeyRenameResponse(@Nonnull OMResponse omResponse) {
-    super(omResponse);
-    checkStatusNotOK();
+  public OMKeyRenameResponseV1(@Nonnull OMResponse omResponse,
+      String fromKeyName, String toKeyName, @Nonnull OmKeyInfo renameKeyInfo,
+      boolean isRenameDirectory) {
+    super(omResponse, fromKeyName, toKeyName, renameKeyInfo);
+    this.isRenameDirectory = isRenameDirectory;
   }
 
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-    String volumeName = renameKeyInfo.getVolumeName();
-    String bucketName = renameKeyInfo.getBucketName();
-    omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
-        omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName));
-    omMetadataManager.getKeyTable().putWithBatch(batchOperation,
-        omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName),
-        renameKeyInfo);
-  }
+                           BatchOperation batchOperation) throws IOException {
+
+    if (isRenameDirectory) {
+      omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation,
+              getFromKeyName());
 
+      OmDirectoryInfo renameDirInfo =
+              OMFileRequest.getDirectoryInfo(getRenameKeyInfo());
+      omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
+              getToKeyName(), renameDirInfo);
+
+    } else {
+      omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
+              getFromKeyName());
+      omMetadataManager.getKeyTable().putWithBatch(batchOperation,
+              getToKeyName(), getRenameKeyInfo());
+    }
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 6784dea..56c7e10 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -523,4 +524,8 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
     return blockLocations;
   }
 
+  @Override
+  public String getBucketLayoutVersion() {
+    return bucket.getMetadata().get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION);
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index ffe3aef..d217da8 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -314,6 +315,7 @@ public class BasicOzoneFileSystem extends FileSystem {
 
     String srcPath = src.toUri().getPath();
     String dstPath = dst.toUri().getPath();
+    // TODO: Discuss do we need to throw exception.
     if (srcPath.equals(dstPath)) {
       return true;
     }
@@ -325,6 +327,12 @@ public class BasicOzoneFileSystem extends FileSystem {
       return false;
     }
 
+    String layOutVersion = adapter.getBucketLayoutVersion();
+    if (layOutVersion != null &&
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) {
+      return renameV1(srcPath, dstPath);
+    }
+
     // Check if the source exists
     FileStatus srcStatus;
     try {
@@ -405,6 +413,11 @@ public class BasicOzoneFileSystem extends FileSystem {
     return result;
   }
 
+  private boolean renameV1(String srcPath, String dstPath) throws IOException {
+    adapter.renameKey(srcPath, dstPath);
+    return true;
+  }
+
   /**
    * Intercept rename to trash calls from TrashPolicyDefault.
    */
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 74f5557..4ea08f2 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -1031,4 +1032,10 @@ public class BasicRootedOzoneClientAdapterImpl
         null, null, null, new BlockLocation[0]
     );
   }
+
+  @Override
+  public String getBucketLayoutVersion() {
+    // TODO: Need to refine this part.
+    return OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
index b9e2881..5b65a0e 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
@@ -75,4 +75,5 @@ public interface OzoneClientAdapter {
   FileStatusAdapter getFileStatus(String key, URI uri,
       Path qualifiedPath, String userName) throws IOException;
 
+  String getBucketLayoutVersion();
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 03/19: HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 78605ddc878a137f21f47ecf3931818dd5d90674
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Sat Oct 31 06:32:23 2020 +0530

    HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)
---
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  31 ++
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   |  18 +-
 .../hadoop/fs/ozone/TestOzoneFileInterfacesV1.java |  66 ++++
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  49 ++-
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 415 +++++++++++++++++++++
 .../ozone/freon/TestHadoopDirTreeGenerator.java    |  22 +-
 .../ozone/freon/TestHadoopDirTreeGeneratorV1.java  |  33 ++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 398 ++++++++++++++++++++
 .../request/file/OMDirectoryCreateRequestV1.java   |   2 -
 .../ozone/om/request/file/OMFileRequest.java       | 117 +++++-
 10 files changed, 1133 insertions(+), 18 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index 96df56f..63bfd8f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.util.StringUtils;
 import javax.annotation.Nonnull;
 import java.nio.file.Paths;
 
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 
 /**
@@ -131,4 +132,34 @@ public final class OzoneFSUtils {
     // failed to converts a path key
     return keyName;
   }
+
+  /**
+   * Verifies whether the childKey is an immediate path under the given
+   * parentKey.
+   *
+   * @param parentKey parent key name
+   * @param childKey  child key name
+   * @return true if childKey is an immediate path under the given parentKey
+   */
+  public static boolean isImmediateChild(String parentKey, String childKey) {
+
+    // Empty childKey has no parent, so just returning false.
+    if (org.apache.commons.lang3.StringUtils.isBlank(childKey)) {
+      return false;
+    }
+    java.nio.file.Path parentPath = Paths.get(parentKey);
+    java.nio.file.Path childPath = Paths.get(childKey);
+
+    java.nio.file.Path childParent = childPath.getParent();
+    // Following are the valid parentKey formats:
+    // parentKey="" or parentKey="/" or parentKey="/a" or parentKey="a"
+    // Following are the valid childKey formats:
+    // childKey="/" or childKey="/a/b" or childKey="a/b"
+    if (org.apache.commons.lang3.StringUtils.isBlank(parentKey)) {
+      return childParent == null ||
+              OM_KEY_PREFIX.equals(childParent.toString());
+    }
+
+    return parentPath.equals(childParent);
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
index fbdd5d4..4118c95 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
@@ -56,6 +56,8 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
+
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Assert;
 
@@ -119,7 +121,8 @@ public class TestOzoneFileInterfaces {
 
   private OMMetrics omMetrics;
 
-  private boolean enableFileSystemPaths;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected boolean enableFileSystemPaths;
 
   public TestOzoneFileInterfaces(boolean setDefaultFs,
       boolean useAbsolutePath, boolean enabledFileSystemPaths) {
@@ -134,9 +137,8 @@ public class TestOzoneFileInterfaces {
     volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
     bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
 
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-        enableFileSystemPaths);
+    OzoneConfiguration conf = getOzoneConfiguration();
+
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .build();
@@ -161,6 +163,14 @@ public class TestOzoneFileInterfaces {
     omMetrics = cluster.getOzoneManager().getMetrics();
   }
 
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+        enableFileSystemPaths);
+    return conf;
+  }
+
   @After
   public void teardown() throws IOException {
     if (cluster != null) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
new file mode 100644
index 0000000..93473be
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Test OzoneFileSystem Interfaces layout version V1.
+ *
+ * This test will test the various interfaces i.e.
+ * create, read, write, getFileStatus
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneFileInterfacesV1 extends TestOzoneFileInterfaces {
+
+  public TestOzoneFileInterfacesV1(boolean setDefaultFs,
+      boolean useAbsolutePath, boolean enabledFileSystemPaths) {
+    super(setDefaultFs, useAbsolutePath, enabledFileSystemPaths);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+            enableFileSystemPaths);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return conf;
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testDirectory() {
+
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testOzFsReadWrite() {
+
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index be383e2..ab0865d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -121,15 +121,24 @@ public class TestOzoneFileSystem {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestOzoneFileSystem.class);
 
-  private static boolean enabledFileSystemPaths;
-  private static boolean omRatisEnabled;
-
-  private static MiniOzoneCluster cluster;
-  private static FileSystem fs;
-  private static OzoneFileSystem o3fs;
-  private static String volumeName;
-  private static String bucketName;
-  private static Trash trash;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static boolean enabledFileSystemPaths;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static boolean omRatisEnabled;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static MiniOzoneCluster cluster;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static FileSystem fs;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static OzoneFileSystem o3fs;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static String volumeName;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static String bucketName;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static int rootItemCount;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static Trash trash;
 
   private void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
@@ -228,6 +237,28 @@ public class TestOzoneFileSystem {
     } catch (FileAlreadyExistsException fae) {
       // ignore as its expected
     }
+
+    // Directory
+    FileStatus fileStatus = fs.getFileStatus(parent);
+    assertEquals("FileStatus did not return the directory",
+            "/d1/d2/d3/d4", fileStatus.getPath().toUri().getPath());
+    assertTrue("FileStatus did not return the directory",
+            fileStatus.isDirectory());
+
+    // invalid sub directory
+    try{
+      fs.getFileStatus(new Path("/d1/d2/d3/d4/key3/invalid"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
+    // invalid file name
+    try{
+      fs.getFileStatus(new Path("/d1/d2/d3/d4/invalidkey"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
new file mode 100644
index 0000000..6868040
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -0,0 +1,415 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.junit.Assert;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+/**
+ * Ozone file system tests that are not covered by contract tests,
+ * layout version V1.
+ *
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
+
+  @Ignore("TODO:HDDS-2939")
+  @BeforeClass
+  public static void init() throws Exception {
+
+  }
+
+  public TestOzoneFileSystemV1(boolean setDefaultFs, boolean enableOMRatis) {
+    super(setDefaultFs, enableOMRatis);
+  }
+
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+            enabledFileSystemPaths);
+    if (enabledFileSystemPaths) {
+      conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    }
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .build();
+    cluster.waitForClusterToBeReady();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+
+    fs = FileSystem.get(conf);
+    trash = new Trash(conf);
+    o3fs = (OzoneFileSystem) fs;
+  }
+
+  @After
+  @Override
+  public void cleanup() {
+    super.cleanup();
+    try {
+      tableCleanup();
+    } catch (IOException e) {
+      LOG.info("Failed to cleanup DB tables.", e);
+      fail("Failed to cleanup DB tables." + e.getMessage());
+    }
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  /**
+   * Set a timeout for each test.
+   */
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestOzoneFileSystemV1.class);
+
+  @Test
+  public void testListStatusWithoutRecursiveSearch() throws Exception {
+    /*
+     * Op 1. create file -> /key1
+     * Op 2. create dir -> /d1/d2
+     * Op 3. create dir -> /d1/d3
+     * Op 4. create dir -> /d1/d4
+     * Op 5. create file -> /d1/key1
+     * Op 6. create file -> /d2/key1
+     * Op 7. create file -> /d1/d2/key1
+     */
+    Path key1 = new Path("/key1");
+    try (FSDataOutputStream outputStream = fs.create(key1, false)) {
+      assertNotNull("Should be able to create file: key1",
+              outputStream);
+    }
+    Path d1 = new Path("/d1");
+    Path dir1Key1 = new Path(d1, "key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir1Key1,
+              outputStream);
+    }
+    Path d2 = new Path("/d2");
+    Path dir2Key1 = new Path(d2, "key1");
+    try (FSDataOutputStream outputStream = fs.create(dir2Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir2Key1,
+              outputStream);
+    }
+    Path dir1Dir2 = new Path("/d1/d2/");
+    Path dir1Dir2Key1 = new Path(dir1Dir2, "key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Dir2Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir1Dir2Key1,
+              outputStream);
+    }
+    Path d1Key2 = new Path(d1, "key2");
+    try (FSDataOutputStream outputStream = fs.create(d1Key2, false)) {
+      assertNotNull("Should be able to create file: " + d1Key2,
+              outputStream);
+    }
+
+    Path dir1Dir3 = new Path("/d1/d3/");
+    Path dir1Dir4 = new Path("/d1/d4/");
+
+    fs.mkdirs(dir1Dir3);
+    fs.mkdirs(dir1Dir4);
+
+    // Root Directory
+    FileStatus[] fileStatusList = fs.listStatus(new Path("/"));
+    assertEquals("FileStatus should return files and directories",
+            3, fileStatusList.length);
+    ArrayList<String> expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d2");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-1 sub-dirs
+    fileStatusList = fs.listStatus(new Path("/d1"));
+    assertEquals("FileStatus should return files and directories",
+            5, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d3");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d4");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/key1");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/key2");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-2 sub-dirs
+    fileStatusList = fs.listStatus(new Path("/d1/d2"));
+    assertEquals("FileStatus should return files and directories",
+            1, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2/" +
+            "key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-2 key2
+    fileStatusList = fs.listStatus(new Path("/d1/d2/key1"));
+    assertEquals("FileStatus should return files and directories",
+            1, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2/" +
+            "key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // invalid root key
+    try {
+      fileStatusList = fs.listStatus(new Path("/key2"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
+    try {
+      fileStatusList = fs.listStatus(new Path("/d1/d2/key2"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+
+    }
+  }
+
+  @Test
+  public void testListFilesRecursive() throws Exception {
+    /*
+     * Op 1. create file -> /d1/d1/d2/key1
+     * Op 2. create dir -> /key1
+     * Op 3. create dir -> /key2
+     * Op 4. create dir -> /d1/d2/d1/d2/key1
+     */
+    Path dir1Dir1Dir2Key1 = new Path("/d1/d1/d2/key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Dir1Dir2Key1,
+            false)) {
+      assertNotNull("Should be able to create file: " + dir1Dir1Dir2Key1,
+              outputStream);
+    }
+    Path key1 = new Path("/key1");
+    try (FSDataOutputStream outputStream = fs.create(key1, false)) {
+      assertNotNull("Should be able to create file: " + key1,
+              outputStream);
+    }
+    Path key2 = new Path("/key2");
+    try (FSDataOutputStream outputStream = fs.create(key2, false)) {
+      assertNotNull("Should be able to create file: key2",
+              outputStream);
+    }
+    Path dir1Dir2Dir1Dir2Key1 = new Path("/d1/d2/d1/d2/key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Dir2Dir1Dir2Key1,
+            false)) {
+      assertNotNull("Should be able to create file: "
+              + dir1Dir2Dir1Dir2Key1, outputStream);
+    }
+    RemoteIterator<LocatedFileStatus> fileStatusItr = fs.listFiles(new Path(
+            "/"), true);
+    String uriPrefix = "o3fs://" + bucketName + "." + volumeName;
+    ArrayList<String> expectedPaths = new ArrayList<>();
+    expectedPaths.add(uriPrefix + dir1Dir1Dir2Key1.toString());
+    expectedPaths.add(uriPrefix + key1.toString());
+    expectedPaths.add(uriPrefix + key2.toString());
+    expectedPaths.add(uriPrefix + dir1Dir2Dir1Dir2Key1.toString());
+    int expectedFilesCount = expectedPaths.size();
+    int actualCount = 0;
+    while (fileStatusItr.hasNext()) {
+      LocatedFileStatus status = fileStatusItr.next();
+      expectedPaths.remove(status.getPath().toString());
+      actualCount++;
+    }
+    assertEquals("Failed to get all the files: " + expectedPaths,
+            expectedFilesCount, actualCount);
+    assertEquals("Failed to get all the files: " + expectedPaths, 0,
+            expectedPaths.size());
+
+    // Recursive=false
+    fileStatusItr = fs.listFiles(new Path("/"), false);
+    expectedPaths.clear();
+    expectedPaths.add(uriPrefix + "/key1");
+    expectedPaths.add(uriPrefix + "/key2");
+    expectedFilesCount = expectedPaths.size();
+    actualCount = 0;
+    while (fileStatusItr.hasNext()) {
+      LocatedFileStatus status = fileStatusItr.next();
+      expectedPaths.remove(status.getPath().toString());
+      actualCount++;
+    }
+    assertEquals("Failed to get all the files: " + expectedPaths, 0,
+            expectedPaths.size());
+    assertEquals("Failed to get all the files: " + expectedPaths,
+            expectedFilesCount, actualCount);
+  }
+
+  /**
+   * Cleanup keyTable and directoryTable explicitly as FS delete operation
+   * is not yet supported.
+   *
+   * @throws IOException DB failure
+   */
+  protected void tableCleanup() throws IOException {
+    OMMetadataManager metadataMgr = cluster.getOzoneManager()
+            .getMetadataManager();
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmDirectoryInfo>> dirTableIterator =
+            metadataMgr.getDirectoryTable().iterator();
+    dirTableIterator.seekToFirst();
+    ArrayList <String> dirList = new ArrayList<>();
+    while (dirTableIterator.hasNext()) {
+      String key = dirTableIterator.key();
+      if (StringUtils.isNotBlank(key)) {
+        dirList.add(key);
+      }
+      dirTableIterator.next();
+    }
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            cacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
+    while(cacheIterator.hasNext()){
+      cacheIterator.next();
+      cacheIterator.remove();
+    }
+
+    for (String dirKey : dirList) {
+      metadataMgr.getDirectoryTable().delete(dirKey);
+      Assert.assertNull("Unexpected entry!",
+              metadataMgr.getDirectoryTable().get(dirKey));
+    }
+
+    Assert.assertTrue("DirTable is not empty",
+            metadataMgr.getDirectoryTable().isEmpty());
+
+    Assert.assertFalse(metadataMgr.getDirectoryTable().cacheIterator()
+            .hasNext());
+
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmKeyInfo>> keyTableIterator =
+            metadataMgr.getKeyTable().iterator();
+    keyTableIterator.seekToFirst();
+    ArrayList <String> fileList = new ArrayList<>();
+    while (keyTableIterator.hasNext()) {
+      String key = keyTableIterator.key();
+      if (StringUtils.isNotBlank(key)) {
+        fileList.add(key);
+      }
+      keyTableIterator.next();
+    }
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            keyCacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
+    while(keyCacheIterator.hasNext()){
+      keyCacheIterator.next();
+      keyCacheIterator.remove();
+    }
+
+    for (String fileKey : fileList) {
+      metadataMgr.getKeyTable().delete(fileKey);
+      Assert.assertNull("Unexpected entry!",
+              metadataMgr.getKeyTable().get(fileKey));
+    }
+
+    Assert.assertTrue("KeyTable is not empty",
+            metadataMgr.getKeyTable().isEmpty());
+
+    rootItemCount = 0;
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testTrash() throws Exception {
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testRenameToTrashEnabled() throws Exception {
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
index 6ad5b8d..776551b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
@@ -31,12 +31,15 @@ import org.apache.ratis.server.raftlog.RaftLog;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URI;
+import java.util.ArrayList;
 
 /**
  * Test for HadoopDirTreeGenerator.
@@ -47,6 +50,8 @@ public class TestHadoopDirTreeGenerator {
   private OzoneConfiguration conf = null;
   private MiniOzoneCluster cluster = null;
   private ObjectStore store = null;
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestHadoopDirTreeGenerator.class);
 
   @Before
   public void setup() {
@@ -74,7 +79,7 @@ public class TestHadoopDirTreeGenerator {
    * @throws IOException
    */
   private void startCluster() throws Exception {
-    conf = new OzoneConfiguration();
+    conf = getOzoneConfiguration();
 
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
     cluster.waitForClusterToBeReady();
@@ -83,6 +88,10 @@ public class TestHadoopDirTreeGenerator {
     store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
   }
 
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @Test
   public void testNestedDirTreeGeneration() throws Exception {
     try {
@@ -103,6 +112,9 @@ public class TestHadoopDirTreeGenerator {
               2, 4, 2);
       verifyDirTree("vol5", "bucket1", 5,
               4, 1, 0);
+      // default page size is Constants.LISTING_PAGE_SIZE = 1024
+      verifyDirTree("vol6", "bucket1", 2,
+              1, 1100, 0);
     } finally {
       shutdown();
     }
@@ -122,6 +134,7 @@ public class TestHadoopDirTreeGenerator {
             fileCount + "", "-s", span + "", "-n", "1", "-r", rootPath,
                      "-g", perFileSizeInBytes + ""});
     // verify the directory structure
+    LOG.info("Started verifying the directory structure...");
     FileSystem fileSystem = FileSystem.get(URI.create(rootPath),
             conf);
     Path rootDir = new Path(rootPath.concat("/"));
@@ -149,6 +162,7 @@ public class TestHadoopDirTreeGenerator {
       verifyActualSpan(expectedSpanCnt, fileStatuses);
     }
     int actualNumFiles = 0;
+    ArrayList <String> files = new ArrayList<>();
     for (FileStatus fileStatus : fileStatuses) {
       if (fileStatus.isDirectory()) {
         ++depth;
@@ -157,6 +171,12 @@ public class TestHadoopDirTreeGenerator {
       } else {
         Assert.assertEquals("Mismatches file len",
                 perFileSizeInBytes, fileStatus.getLen());
+        String fName = fileStatus.getPath().getName();
+        Assert.assertFalse("actualNumFiles:" + actualNumFiles +
+                        ", fName:" + fName + ", expectedFileCnt:" +
+                        expectedFileCnt + ", depth:" + depth,
+                files.contains(fName));
+        files.add(fName);
         actualNumFiles++;
       }
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
new file mode 100644
index 0000000..99d4f26
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+
+/**
+ * Test for HadoopDirTreeGenerator layout version V1.
+ */
+public class TestHadoopDirTreeGeneratorV1 extends TestHadoopDirTreeGenerator {
+
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return conf;
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 4a968fd..4a4327d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -31,6 +31,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
@@ -76,6 +77,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -95,7 +97,9 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
 import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -129,6 +133,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BL
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
 import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND;
@@ -1774,6 +1779,10 @@ public class KeyManagerImpl implements KeyManager {
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
 
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return getOzoneFileStatusV1(volumeName, bucketName, keyName,
+              args.getSortDatanodes(), clientAddress, false);
+    }
     return getOzoneFileStatus(volumeName, bucketName, keyName,
             args.getRefreshPipeline(), args.getSortDatanodes(), clientAddress);
   }
@@ -1838,6 +1847,65 @@ public class KeyManagerImpl implements KeyManager {
             FILE_NOT_FOUND);
   }
 
+
+  private OzoneFileStatus getOzoneFileStatusV1(String volumeName,
+      String bucketName, String keyName, boolean sortDatanodes,
+      String clientAddress, boolean skipFileNotFoundError) throws IOException {
+    OzoneFileStatus fileStatus = null;
+    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
+            bucketName);
+    try {
+      // Check if this is the root of the filesystem.
+      if (keyName.length() == 0) {
+        validateBucket(volumeName, bucketName);
+        return new OzoneFileStatus();
+      }
+
+      fileStatus = OMFileRequest.getOMKeyInfoIfExists(metadataManager,
+              volumeName, bucketName, keyName, scmBlockSize);
+
+    } finally {
+      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+              bucketName);
+    }
+
+    if (fileStatus != null) {
+      // if the key is a file then do refresh pipeline info in OM by asking SCM
+      if (fileStatus.isFile()) {
+
+        OmKeyInfo fileKeyInfo = fileStatus.getKeyInfo();
+
+        // refreshPipeline flag check has been removed as part of
+        // https://issues.apache.org/jira/browse/HDDS-3658.
+        // Please refer this jira for more details.
+        refresh(fileKeyInfo);
+
+        if (sortDatanodes) {
+          sortDatanodes(clientAddress, fileKeyInfo);
+        }
+        return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false);
+      } else {
+        return fileStatus;
+      }
+    }
+
+    // Key not found.
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Unable to get file status for the key: volume: {}, bucket:" +
+                      " {}, key: {}, with error: No such file exists.",
+              volumeName, bucketName, keyName);
+    }
+
+    // don't throw exception if this flag is true.
+    if (skipFileNotFoundError) {
+      return fileStatus;
+    }
+
+    throw new OMException("Unable to get file status: volume: " +
+            volumeName + " bucket: " + bucketName + " key: " + keyName,
+            FILE_NOT_FOUND);
+  }
+
   /**
    * Ozone FS api to create a directory. Parent directories if do not exist
    * are created for the input directory.
@@ -2083,6 +2151,10 @@ public class KeyManagerImpl implements KeyManager {
       return fileStatusList;
     }
 
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return listStatusV1(args, recursive, startKey, numEntries, clientAddress);
+    }
+
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
@@ -2218,6 +2290,332 @@ public class KeyManagerImpl implements KeyManager {
     return fileStatusList;
   }
 
+  public List<OzoneFileStatus> listStatusV1(OmKeyArgs args, boolean recursive,
+      String startKey, long numEntries, String clientAddress)
+          throws IOException {
+    Preconditions.checkNotNull(args, "Key args can not be null");
+
+    // unsorted OMKeyInfo list contains combine results from TableCache and DB.
+    List<OzoneFileStatus> fileStatusFinalList = new ArrayList<>();
+    LinkedHashSet<OzoneFileStatus> fileStatusList = new LinkedHashSet<>();
+    if (numEntries <= 0) {
+      return fileStatusFinalList;
+    }
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    String seekFileInDB;
+    String seekDirInDB;
+    long prefixKeyInDB;
+    String prefixPath = keyName;
+    int countEntries = 0;
+
+    // TODO: recursive flag=true will be handled in HDDS-4360 jira.
+    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
+            bucketName);
+    try {
+      if (Strings.isNullOrEmpty(startKey)) {
+        OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
+        if (fileStatus.isFile()) {
+          return Collections.singletonList(fileStatus);
+        }
+
+        // Not required to search in DeletedTable because all the deleted
+        // keys will be marked directly in dirTable or in keyTable by
+        // breaking the pointer to its sub-dirs and sub-files. So, there is no
+        // issue of inconsistency.
+
+        /*
+         * keyName is a directory.
+         * Say, "/a" is the dir name and its objectID is 1024, then seek
+         * will be doing with "1024/" to get all immediate descendants.
+         */
+        if (fileStatus.getKeyInfo() != null) {
+          prefixKeyInDB = fileStatus.getKeyInfo().getObjectID();
+        } else {
+          // list root directory.
+          String bucketKey = metadataManager.getBucketKey(volumeName,
+                  bucketName);
+          OmBucketInfo omBucketInfo =
+                  metadataManager.getBucketTable().get(bucketKey);
+          prefixKeyInDB = omBucketInfo.getObjectID();
+        }
+        seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+        seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+
+        // Order of seek -> (1)Seek dirs in dirTable (2)Seek files in fileTable
+        // 1. Seek the given key in key table.
+        countEntries = getFilesFromDirectory(fileStatusList, seekFileInDB,
+                prefixPath, prefixKeyInDB, startKey, countEntries, numEntries);
+        // 2. Seek the given key in dir table.
+        getDirectories(fileStatusList, seekDirInDB, prefixPath, prefixKeyInDB,
+                startKey, countEntries, numEntries, volumeName, bucketName,
+                recursive);
+      } else {
+        /*
+         * startKey will be used in iterator seek and sets the beginning point
+         * for key traversal.
+         * keyName will be used as parentID where the user has requested to
+         * list the keys from.
+         *
+         * When recursive flag=false, parentID won't change between two pages.
+         * For example: OM has a namespace like,
+         *    /a/1...1M files and /a/b/1...1M files.
+         *    /a/1...1M directories and /a/b/1...1M directories.
+         * Listing "/a", will always have the parentID as "a" irrespective of
+         * the startKey value.
+         */
+
+        // Check startKey is an immediate child of keyName. For example,
+        // keyName=/a/ and expected startKey=/a/b. startKey can't be /xyz/b.
+        if (!OzoneFSUtils.isImmediateChild(keyName, startKey)) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("StartKey {} is not an immediate child of keyName {}. " +
+                    "Returns empty list", startKey, keyName);
+          }
+          return Collections.emptyList();
+        }
+
+        OzoneFileStatus fileStatusInfo = getOzoneFileStatusV1(volumeName,
+                bucketName, startKey, false, null, true);
+
+        if (fileStatusInfo != null) {
+          prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID();
+          if(fileStatusInfo.isDirectory()){
+            seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB,
+                    fileStatusInfo.getKeyInfo().getFileName());
+
+            // Order of seek -> (1) Seek dirs only in dirTable. In OM, always
+            // the order of search is, first seek into fileTable and then
+            // dirTable. So, its not required to search again in the fileTable.
+
+            // Seek the given key in dirTable.
+            getDirectories(fileStatusList, seekDirInDB, prefixPath,
+                    prefixKeyInDB, startKey, countEntries, numEntries,
+                    volumeName, bucketName, recursive);
+          } else {
+            seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB,
+                    fileStatusInfo.getKeyInfo().getFileName());
+            // begins from the first sub-dir under the parent dir
+            seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+
+            // 1. Seek the given key in key table.
+            countEntries = getFilesFromDirectory(fileStatusList, seekFileInDB,
+                    prefixPath, prefixKeyInDB, startKey, countEntries,
+                    numEntries);
+            // 2. Seek the given key in dir table.
+            getDirectories(fileStatusList, seekDirInDB, prefixPath,
+                    prefixKeyInDB, startKey, countEntries, numEntries,
+                    volumeName, bucketName, recursive);
+          }
+        } else {
+          // TODO: HDDS-4364: startKey can be a non-existed key
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("StartKey {} is a non-existed key and returning empty " +
+                    "list", startKey);
+          }
+          return Collections.emptyList();
+        }
+      }
+    } finally {
+      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+              bucketName);
+    }
+    for (OzoneFileStatus fileStatus : fileStatusList) {
+      if (fileStatus.isFile()) {
+        // refreshPipeline flag check has been removed as part of
+        // https://issues.apache.org/jira/browse/HDDS-3658.
+        // Please refer this jira for more details.
+        refresh(fileStatus.getKeyInfo());
+
+        // No need to check if a key is deleted or not here, this is handled
+        // when adding entries to cacheKeyMap from DB.
+        if (args.getSortDatanodes()) {
+          sortDatanodes(clientAddress, fileStatus.getKeyInfo());
+        }
+      }
+    }
+    fileStatusFinalList.addAll(fileStatusList);
+    return fileStatusFinalList;
+  }
+
+  @SuppressWarnings("parameternumber")
+  protected int getDirectories(Set<OzoneFileStatus> fileStatusList,
+      String seekDirInDB, String prefixPath, long prefixKeyInDB,
+      String startKey, int countEntries, long numEntries, String volumeName,
+      String bucketName, boolean recursive) throws IOException {
+
+    Table dirTable = metadataManager.getDirectoryTable();
+    countEntries = listStatusFindDirsInTableCache(fileStatusList, dirTable,
+            prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName,
+            bucketName, countEntries, numEntries);
+    TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
+            iterator = dirTable.iterator();
+
+    iterator.seek(seekDirInDB);
+
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmDirectoryInfo dirInfo = iterator.value().getValue();
+      if (!isImmediateChild(dirInfo.getParentObjectID(), prefixKeyInDB)) {
+        break;
+      }
+
+      // TODO: recursive list will be handled in HDDS-4360 jira.
+      if (!recursive) {
+        String dirName = OMFileRequest.getAbsolutePath(prefixPath,
+                dirInfo.getName());
+        OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
+                bucketName, dirInfo, dirName);
+        fileStatusList.add(new OzoneFileStatus(omKeyInfo, scmBlockSize,
+                true));
+        countEntries++;
+      }
+      // move to next entry in the DirTable
+      iterator.next();
+    }
+
+    return countEntries;
+  }
+
+  private int getFilesFromDirectory(Set<OzoneFileStatus> fileStatusList,
+      String seekKeyInDB, String prefixKeyPath, long prefixKeyInDB,
+      String startKey, int countEntries, long numEntries) throws IOException {
+
+    Table<String, OmKeyInfo> keyTable = metadataManager.getKeyTable();
+    countEntries = listStatusFindFilesInTableCache(fileStatusList, keyTable,
+            prefixKeyInDB, seekKeyInDB, prefixKeyPath, startKey,
+            countEntries, numEntries);
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+            iterator = keyTable.iterator();
+    iterator.seek(seekKeyInDB);
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmKeyInfo keyInfo = iterator.value().getValue();
+
+      if (!isImmediateChild(keyInfo.getParentObjectID(), prefixKeyInDB)) {
+        break;
+      }
+
+      keyInfo.setFileName(keyInfo.getKeyName());
+      String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              keyInfo.getKeyName());
+      keyInfo.setKeyName(fullKeyPath);
+      fileStatusList.add(new OzoneFileStatus(keyInfo, scmBlockSize, false));
+      countEntries++;
+      iterator.next(); // move to next entry in the table
+    }
+    return countEntries;
+  }
+
+  private boolean isImmediateChild(long parentId, long ancestorId) {
+    return parentId == ancestorId;
+  }
+
+  /**
+   * Helper function for listStatus to find key in FileTableCache.
+   */
+  @SuppressWarnings("parameternumber")
+  private int listStatusFindFilesInTableCache(
+          Set<OzoneFileStatus> fileStatusList, Table<String,
+          OmKeyInfo> keyTable, long prefixKeyInDB, String seekKeyInDB,
+          String prefixKeyPath, String startKey, int countEntries,
+          long numEntries) {
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
+            cacheIter = keyTable.cacheIterator();
+
+    // TODO: recursive list will be handled in HDDS-4360 jira.
+    while (cacheIter.hasNext() && numEntries - countEntries > 0) {
+      Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry =
+              cacheIter.next();
+      String cacheKey = entry.getKey().getCacheKey();
+      OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue();
+      // cacheOmKeyInfo is null if an entry is deleted in cache
+      if(cacheOmKeyInfo == null){
+        continue;
+      }
+
+      cacheOmKeyInfo.setFileName(cacheOmKeyInfo.getKeyName());
+      String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              cacheOmKeyInfo.getKeyName());
+      cacheOmKeyInfo.setKeyName(fullKeyPath);
+
+      countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
+              seekKeyInDB, startKey, countEntries, cacheKey, cacheOmKeyInfo,
+              false);
+    }
+    return countEntries;
+  }
+
+  /**
+   * Helper function for listStatus to find key in DirTableCache.
+   */
+  @SuppressWarnings("parameternumber")
+  private int listStatusFindDirsInTableCache(
+          Set<OzoneFileStatus> fileStatusList, Table<String,
+          OmDirectoryInfo> dirTable, long prefixKeyInDB, String seekKeyInDB,
+          String prefixKeyPath, String startKey, String volumeName,
+          String bucketName, int countEntries, long numEntries) {
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            cacheIter = dirTable.cacheIterator();
+    // seekKeyInDB will have two type of values.
+    // 1. "1024/"   -> startKey is null or empty
+    // 2. "1024/b"  -> startKey exists
+    // TODO: recursive list will be handled in HDDS-4360 jira.
+    while (cacheIter.hasNext() && numEntries - countEntries > 0) {
+      Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>> entry =
+              cacheIter.next();
+      String cacheKey = entry.getKey().getCacheKey();
+      OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue();
+      if(cacheOmDirInfo == null){
+        continue;
+      }
+      String fullDirPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              cacheOmDirInfo.getName());
+      OmKeyInfo cacheDirKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
+              bucketName, cacheOmDirInfo, fullDirPath);
+
+      countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
+              seekKeyInDB, startKey, countEntries, cacheKey, cacheDirKeyInfo,
+              true);
+    }
+    return countEntries;
+  }
+
+  @SuppressWarnings("parameternumber")
+  private int addKeyInfoToFileStatusList(Set<OzoneFileStatus> fileStatusList,
+      long prefixKeyInDB, String seekKeyInDB, String startKey,
+      int countEntries, String cacheKey, OmKeyInfo cacheOmKeyInfo,
+      boolean isDirectory) {
+    // seekKeyInDB will have two type of values.
+    // 1. "1024/"   -> startKey is null or empty
+    // 2. "1024/b"  -> startKey exists
+    if (StringUtils.isBlank(startKey)) {
+      // startKey is null or empty, then the seekKeyInDB="1024/"
+      if (cacheKey.startsWith(seekKeyInDB)) {
+        OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
+                scmBlockSize, isDirectory);
+        fileStatusList.add(fileStatus);
+        countEntries++;
+      }
+    } else {
+      // startKey not empty, then the seekKeyInDB="1024/b" and
+      // seekKeyInDBWithOnlyParentID = "1024/". This is to avoid case of
+      // parentID with "102444" cache entries.
+      // Here, it has to list all the keys after "1024/b" and requires >=0
+      // string comparison.
+      String seekKeyInDBWithOnlyParentID = prefixKeyInDB + OM_KEY_PREFIX;
+      if (cacheKey.startsWith(seekKeyInDBWithOnlyParentID) &&
+              cacheKey.compareTo(seekKeyInDB) >= 0) {
+        OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
+                scmBlockSize, isDirectory);
+        fileStatusList.add(fileStatus);
+        countEntries++;
+      }
+    }
+    return countEntries;
+  }
+
   private String getNextGreaterString(String volumeName, String bucketName,
       String keyPrefix) throws IOException {
     // Increment the last character of the string and return the new ozone key.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
index c48ff78..fa66766 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
@@ -232,7 +232,6 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
   /**
    * Construct OmDirectoryInfo for every parent directory in missing list.
    *
-   * @param ozoneManager Ozone Manager
    * @param keyArgs      key arguments
    * @param pathInfo     list of parent directories to be created and its ACLs
    * @param trxnLogIndex transaction log index id
@@ -243,7 +242,6 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
           OzoneManager ozoneManager, KeyArgs keyArgs,
           OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex)
           throws IOException {
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
     List<OmDirectoryInfo> missingParentInfos = new ArrayList<>();
 
     // The base id is left shifted by 8 bits for creating space to
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index ce8d49b..5225d82 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -20,11 +20,15 @@ package org.apache.hadoop.ozone.om.request.file;
 
 import java.io.IOException;
 import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.base.Optional;
+import com.google.common.base.Strings;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -34,7 +38,10 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -432,7 +439,6 @@ public final class OMFileRequest {
    * @param omFileInfo        key info
    * @param fileName          file name
    * @param trxnLogIndex      transaction log index
-   * @return dbOmFileInfo, which keeps leaf node name in keyName field
    */
   public static void addOpenFileTableCacheEntry(
           OMMetadataManager omMetadataManager, String dbOpenFileName,
@@ -460,7 +466,6 @@ public final class OMFileRequest {
    * @param omFileInfo        key info
    * @param fileName          file name
    * @param trxnLogIndex      transaction log index
-   * @return dbOmFileInfo, which keeps leaf node name in keyName field
    */
   public static void addFileTableCacheEntry(
           OMMetadataManager omMetadataManager, String dbFileKey,
@@ -552,4 +557,112 @@ public final class OMFileRequest {
     return dbOmKeyInfo;
   }
 
+  /**
+   * Gets OmKeyInfo if exists for the given key name in the DB.
+   *
+   * @param omMetadataMgr metadata manager
+   * @param volumeName    volume name
+   * @param bucketName    bucket name
+   * @param keyName       key name
+   * @param scmBlockSize  scm block size
+   * @return OzoneFileStatus
+   * @throws IOException DB failure
+   */
+  @Nullable
+  public static OzoneFileStatus getOMKeyInfoIfExists(
+      OMMetadataManager omMetadataMgr, String volumeName, String bucketName,
+      String keyName, long scmBlockSize) throws IOException {
+
+    Path keyPath = Paths.get(keyName);
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataMgr.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataMgr.getBucketTable().get(bucketKey);
+
+    long lastKnownParentId = omBucketInfo.getObjectID();
+    OmDirectoryInfo omDirInfo = null;
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+
+      // For example, /vol1/buck1/a/b/c/d/e/file1.txt
+      // 1. Do lookup path component on directoryTable starting from bucket
+      // 'buck1' to the leaf node component, which is 'file1.txt'.
+      // 2. If there is no dir exists for the leaf node component 'file1.txt'
+      // then do look it on fileTable.
+      String dbNodeName = omMetadataMgr.getOzonePathKey(
+              lastKnownParentId, fileName);
+      omDirInfo = omMetadataMgr.getDirectoryTable().get(dbNodeName);
+
+      if (omDirInfo != null) {
+        lastKnownParentId = omDirInfo.getObjectID();
+      } else if (!elements.hasNext()) {
+        // reached last path component. Check file exists for the given path.
+        OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+                omMetadataMgr, dbNodeName, keyName);
+        if (omKeyInfo != null) {
+          return new OzoneFileStatus(omKeyInfo, scmBlockSize, false);
+        }
+      } else {
+        // Missing intermediate directory and just return null;
+        // key not found in DB
+        return null;
+      }
+    }
+
+    if (omDirInfo != null) {
+      OmKeyInfo omKeyInfo = getOmKeyInfo(volumeName, bucketName, omDirInfo,
+              keyName);
+      return new OzoneFileStatus(omKeyInfo, scmBlockSize, true);
+    }
+
+    // key not found in DB
+    return null;
+  }
+
+  /**
+   * Prepare OmKeyInfo from OmDirectoryInfo.
+   *
+   * @param volumeName volume name
+   * @param bucketName bucket name
+   * @param dirInfo    directory info
+   * @param keyName    user given key name
+   * @return OmKeyInfo object
+   */
+  @NotNull
+  public static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+      OmDirectoryInfo dirInfo, String keyName) {
+
+    OmKeyInfo.Builder builder = new OmKeyInfo.Builder();
+    builder.setParentObjectID(dirInfo.getParentObjectID());
+    builder.setKeyName(keyName);
+    builder.setAcls(dirInfo.getAcls());
+    builder.addAllMetadata(dirInfo.getMetadata());
+    builder.setVolumeName(volumeName);
+    builder.setBucketName(bucketName);
+    builder.setCreationTime(dirInfo.getCreationTime());
+    builder.setModificationTime(dirInfo.getModificationTime());
+    builder.setObjectID(dirInfo.getObjectID());
+    builder.setUpdateID(dirInfo.getUpdateID());
+    builder.setFileName(dirInfo.getName());
+    builder.setReplicationType(HddsProtos.ReplicationType.RATIS);
+    builder.setReplicationFactor(HddsProtos.ReplicationFactor.ONE);
+    builder.setOmKeyLocationInfos(Collections.singletonList(
+            new OmKeyLocationInfoGroup(0, new ArrayList<>())));
+    return builder.build();
+  }
+
+  /**
+   * Returns absolute path.
+   *
+   * @param prefixName prefix path
+   * @param fileName   file name
+   * @return absolute path
+   */
+  @NotNull
+  public static String getAbsolutePath(String prefixName, String fileName) {
+    if (Strings.isNullOrEmpty(prefixName)) {
+      return fileName;
+    }
+    return prefixName.concat(OzoneConsts.OZONE_URI_DELIMITER).concat(fileName);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 14/19: HDDS-4771. [FSO]S3MultiPart: Implement InitiateMultiPartUpload (#1877)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 11af770b14dfd614de4b1d7a98d9f0dd4492c1c0
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Fri Feb 5 11:16:15 2021 +0530

    HDDS-4771. [FSO]S3MultiPart: Implement InitiateMultiPartUpload (#1877)
---
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |  32 ++-
 .../ozone/om/helpers/OmMultipartKeyInfo.java       |  63 ++++-
 .../rpc/TestOzoneClientMultipartUploadV1.java      | 182 ++++++++++++++
 .../src/main/proto/OmClientProtocol.proto          |   1 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  10 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  26 +-
 .../hadoop/ozone/om/codec/OMDBDefinition.java      |  11 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../ozone/om/request/file/OMFileRequest.java       |  26 ++
 .../S3InitiateMultipartUploadRequestV1.java        | 271 +++++++++++++++++++++
 .../S3MultipartUploadCompleteRequest.java          |   5 +-
 .../S3InitiateMultipartUploadResponseV1.java       |  80 ++++++
 .../TestS3InitiateMultipartUploadRequestV1.java    | 186 ++++++++++++++
 .../s3/multipart/TestS3MultipartRequest.java       |  29 +++
 .../TestS3InitiateMultipartUploadResponse.java     |   2 +-
 .../TestS3InitiateMultipartUploadResponseV1.java   |  86 +++++++
 .../s3/multipart/TestS3MultipartResponse.java      |  46 ++++
 17 files changed, 1048 insertions(+), 12 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index d097714..be68d9b 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -54,8 +54,36 @@ public final class OmKeyInfo extends WithObjectID {
   private HddsProtos.ReplicationType type;
   private HddsProtos.ReplicationFactor factor;
   private FileEncryptionInfo encInfo;
-  private String fileName; // leaf node name
-  private long parentObjectID; // pointer to parent directory
+
+  /**
+   * A pointer to parent directory used for path traversal. ParentID will be
+   * used only when the key is created into a FileSystemOptimized(FSO) bucket.
+   * <p>
+   * For example, if a key "a/b/key1" created into a FSOBucket then each
+   * path component will be assigned an ObjectId and linked to its parent path
+   * component using parent's objectID.
+   * <p>
+   * Say, Bucket's ObjectID = 512, which is the parent for its immediate child
+   * element.
+   * <p>
+   * ------------------------------------------|
+   * PathComponent |   ObjectID   |   ParentID |
+   * ------------------------------------------|
+   *      a        |     1024     |     512    |
+   * ------------------------------------------|
+   *      b        |     1025     |     1024   |
+   * ------------------------------------------|
+   *     key1      |     1026     |     1025   |
+   * ------------------------------------------|
+   */
+  private long parentObjectID;
+
+  /**
+   * Represents leaf node name. This also will be used when the keyName is
+   * created on a FileSystemOptimized(FSO) bucket. For example, the user given
+   * keyName is "a/b/key1" then the fileName stores "key1".
+   */
+  private String fileName;
 
   /**
    * ACL Information.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
index df8751c..be37f93 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
@@ -37,6 +37,30 @@ public class OmMultipartKeyInfo extends WithObjectID {
   private TreeMap<Integer, PartKeyInfo> partKeyInfoList;
 
   /**
+   * A pointer to parent directory used for path traversal. ParentID will be
+   * used only when the multipart key is created into a FileSystemOptimized(FSO)
+   * bucket.
+   * <p>
+   * For example, if a key "a/b/multiKey1" created into a FSOBucket then each
+   * path component will be assigned an ObjectId and linked to its parent path
+   * component using parent's objectID.
+   * <p>
+   * Say, Bucket's ObjectID = 512, which is the parent for its immediate child
+   * element.
+   * <p>
+   * ------------------------------------------|
+   * PathComponent |   ObjectID   |   ParentID |
+   * ------------------------------------------|
+   *      a        |     1024     |     512    |
+   * ------------------------------------------|
+   *      b        |     1025     |     1024   |
+   * ------------------------------------------|
+   *   multiKey1   |     1026     |     1025   |
+   * ------------------------------------------|
+   */
+  private long parentID;
+
+  /**
    * Construct OmMultipartKeyInfo object which holds multipart upload
    * information for a key.
    */
@@ -53,6 +77,29 @@ public class OmMultipartKeyInfo extends WithObjectID {
   }
 
   /**
+   * Construct OmMultipartKeyInfo object which holds multipart upload
+   * information for a key.
+   */
+  @SuppressWarnings("parameternumber")
+  public OmMultipartKeyInfo(String id, long creationTime,
+      ReplicationType replicationType, ReplicationFactor replicationFactor,
+      Map<Integer, PartKeyInfo> list, long objectID, long updateID,
+      long parentObjId) {
+    this(id, creationTime, replicationType, replicationFactor, list, objectID,
+            updateID);
+    this.parentID = parentObjId;
+  }
+
+  /**
+   * Returns parentID.
+   *
+   * @return long
+   */
+  public long getParentID() {
+    return parentID;
+  }
+
+  /**
    * Returns the uploadID for this multi part upload of a key.
    * @return uploadID
    */
@@ -95,6 +142,7 @@ public class OmMultipartKeyInfo extends WithObjectID {
     private TreeMap<Integer, PartKeyInfo> partKeyInfoList;
     private long objectID;
     private long updateID;
+    private long parentID;
 
     public Builder() {
       this.partKeyInfoList = new TreeMap<>();
@@ -144,9 +192,14 @@ public class OmMultipartKeyInfo extends WithObjectID {
       return this;
     }
 
+    public Builder setParentID(long parentObjId) {
+      this.parentID = parentObjId;
+      return this;
+    }
+
     public OmMultipartKeyInfo build() {
       return new OmMultipartKeyInfo(uploadID, creationTime, replicationType,
-          replicationFactor, partKeyInfoList, objectID, updateID);
+          replicationFactor, partKeyInfoList, objectID, updateID, parentID);
     }
   }
 
@@ -163,7 +216,7 @@ public class OmMultipartKeyInfo extends WithObjectID {
     return new OmMultipartKeyInfo(multipartKeyInfo.getUploadID(),
         multipartKeyInfo.getCreationTime(), multipartKeyInfo.getType(),
         multipartKeyInfo.getFactor(), list, multipartKeyInfo.getObjectID(),
-        multipartKeyInfo.getUpdateID());
+        multipartKeyInfo.getUpdateID(), multipartKeyInfo.getParentID());
   }
 
   /**
@@ -177,7 +230,8 @@ public class OmMultipartKeyInfo extends WithObjectID {
         .setType(replicationType)
         .setFactor(replicationFactor)
         .setObjectID(objectID)
-        .setUpdateID(updateID);
+        .setUpdateID(updateID)
+        .setParentID(parentID);
     partKeyInfoList.forEach((key, value) -> builder.addPartKeyInfoList(value));
     return builder.build();
   }
@@ -205,7 +259,8 @@ public class OmMultipartKeyInfo extends WithObjectID {
     // For partKeyInfoList we can do shallow copy here, as the PartKeyInfo is
     // immutable here.
     return new OmMultipartKeyInfo(uploadID, creationTime, replicationType,
-        replicationFactor, new TreeMap<>(partKeyInfoList), objectID, updateID);
+        replicationFactor, new TreeMap<>(partKeyInfoList), objectID, updateID,
+        parentID);
   }
 
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
new file mode 100644
index 0000000..93e5826
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+
+/**
+ * This test verifies all the S3 multipart client apis - layout version V1.
+ */
+public class TestOzoneClientMultipartUploadV1 {
+
+  private static ObjectStore store = null;
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneClient ozClient = null;
+
+  private static String scmId = UUID.randomUUID().toString();
+
+  /**
+   * Set a timeout for each test.
+   */
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  /**
+   * Create a MiniOzoneCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+    startCluster(conf);
+  }
+
+  /**
+   * Close OzoneClient and shutdown MiniOzoneCluster.
+   */
+  @AfterClass
+  public static void shutdown() throws IOException {
+    shutdownCluster();
+  }
+
+
+  /**
+   * Create a MiniOzoneCluster for testing.
+   * @param conf Configurations to start the cluster.
+   * @throws Exception
+   */
+  static void startCluster(OzoneConfiguration conf) throws Exception {
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .setTotalPipelineNumLimit(10)
+            .setScmId(scmId)
+            .build();
+    cluster.waitForClusterToBeReady();
+    ozClient = OzoneClientFactory.getRpcClient(conf);
+    store = ozClient.getObjectStore();
+  }
+
+  /**
+   * Close OzoneClient and shutdown MiniOzoneCluster.
+   */
+  static void shutdownCluster() throws IOException {
+    if(ozClient != null) {
+      ozClient.close();
+    }
+
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testInitiateMultipartUploadWithReplicationInformationSet() throws
+          IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotNull(multipartInfo.getUploadID());
+
+    // Call initiate multipart upload for the same key again, this should
+    // generate a new uploadID.
+    multipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    assertNotNull(multipartInfo);
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotEquals(multipartInfo.getUploadID(), uploadID);
+    assertNotNull(multipartInfo.getUploadID());
+  }
+
+  @Test
+  public void testInitiateMultipartUploadWithDefaultReplication() throws
+          IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName);
+
+    assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotNull(multipartInfo.getUploadID());
+
+    // Call initiate multipart upload for the same key again, this should
+    // generate a new uploadID.
+    multipartInfo = bucket.initiateMultipartUpload(keyName);
+
+    assertNotNull(multipartInfo);
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotEquals(multipartInfo.getUploadID(), uploadID);
+    assertNotNull(multipartInfo.getUploadID());
+  }
+
+}
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index f429276..80999be 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1099,6 +1099,7 @@ message MultipartKeyInfo {
     repeated PartKeyInfo partKeyInfoList = 5;
     optional uint64 objectID = 6;
     optional uint64 updateID = 7;
+    optional uint64 parentID = 8;
 }
 
 message PartKeyInfo {
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index caa8ed7..b354643 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -425,4 +425,14 @@ public interface OMMetadataManager {
    * @return DB directory key as String.
    */
   String getOpenFileName(long parentObjectId, String fileName, long id);
+
+  /**
+   * Returns the DB key name of a multipart upload key in OM metadata store.
+   *
+   * @param parentObjectId - parent object Id
+   * @param fileName       - file name
+   * @param uploadId       - the upload id for this key
+   * @return bytes of DB key.
+   */
+  String getMultipartKey(long parentObjectId, String fileName, String uploadId);
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 3ac234a..fd6853a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -131,9 +131,11 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * |  directoryTable    | parentId/directoryName -> DirectoryInfo         |
    * |----------------------------------------------------------------------|
-   * |  fileTable         | parentId/fileName -> KeyInfo                |
+   * |  fileTable         | parentId/fileName -> KeyInfo                    |
    * |----------------------------------------------------------------------|
-   * |  openFileTable     | parentId/fileName/id -> KeyInfo                   |
+   * |  openFileTable     | parentId/fileName/id -> KeyInfo                 |
+   * |----------------------------------------------------------------------|
+   * |  multipartFileInfoTable | parentId/fileName/uploadId ->...           |
    * |----------------------------------------------------------------------|
    * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
    * |----------------------------------------------------------------------|
@@ -152,6 +154,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String DIRECTORY_TABLE = "directoryTable";
   public static final String FILE_TABLE = "fileTable";
   public static final String OPEN_FILE_TABLE = "openFileTable";
+  public static final String MULTIPARTFILEINFO_TABLE = "multipartFileInfoTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
@@ -176,6 +179,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private Table transactionInfoTable;
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
+  private Table<String, OmMultipartKeyInfo> multipartFileInfoTable;
 
   // Epoch is used to generate the objectIDs. The most significant 2 bits of
   // objectIDs is set to this epoch. For clusters before HDDS-4315 there is
@@ -271,6 +275,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmMultipartKeyInfo> getMultipartInfoTable() {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      return multipartFileInfoTable;
+    }
     return multipartInfoTable;
   }
 
@@ -364,6 +371,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addTable(DIRECTORY_TABLE)
         .addTable(FILE_TABLE)
         .addTable(OPEN_FILE_TABLE)
+        .addTable(MULTIPARTFILEINFO_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -442,6 +450,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
             OmKeyInfo.class);
     checkTableStatus(openFileTable, OPEN_FILE_TABLE);
 
+    multipartFileInfoTable = this.store.getTable(MULTIPARTFILEINFO_TABLE,
+            String.class, OmMultipartKeyInfo.class);
+    checkTableStatus(multipartFileInfoTable, MULTIPARTFILEINFO_TABLE);
+
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
         String.class, OMTransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
@@ -1224,4 +1236,14 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
     openKey.append(OM_KEY_PREFIX).append(id);
     return openKey.toString();
   }
+
+  @Override
+  public String getMultipartKey(long parentID, String fileName,
+                                String uploadId) {
+    StringBuilder openKey = new StringBuilder();
+    openKey.append(parentID);
+    openKey.append(OM_KEY_PREFIX).append(fileName);
+    openKey.append(OM_KEY_PREFIX).append(uploadId);
+    return openKey.toString();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index d025948..5362bac 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -170,6 +170,15 @@ public class OMDBDefinition implements DBDefinition {
                   OmKeyInfo.class,
                   new OmKeyInfoCodec(true));
 
+  public static final DBColumnFamilyDefinition<String, OmMultipartKeyInfo>
+          MULTIPART_FILEINFO_TABLE =
+          new DBColumnFamilyDefinition<>(
+                  OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE,
+                  String.class,
+                  new StringCodec(),
+                  OmMultipartKeyInfo.class,
+                  new OmMultipartKeyInfoCodec());
+
   @Override
   public String getName() {
     return OzoneConsts.OM_DB_NAME;
@@ -186,7 +195,7 @@ public class OMDBDefinition implements DBDefinition {
         VOLUME_TABLE, OPEN_KEY_TABLE, KEY_TABLE,
         BUCKET_TABLE, MULTIPART_INFO_TABLE, PREFIX_TABLE, DTOKEN_TABLE,
         S3_SECRET_TABLE, TRANSACTION_INFO_TABLE, DIRECTORY_TABLE,
-        FILE_TABLE, OPEN_FILE_TABLE};
+        FILE_TABLE, OPEN_FILE_TABLE, MULTIPART_FILEINFO_TABLE};
   }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 26fded2..8daa12b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixAddAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixSetAclRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
@@ -182,6 +183,9 @@ public final class OzoneManagerRatisUtils {
     case PurgeKeys:
       return new OMKeyPurgeRequest(omRequest);
     case InitiateMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3InitiateMultipartUploadRequestV1(omRequest);
+      }
       return new S3InitiateMultipartUploadRequest(omRequest);
     case CommitMultiPartUpload:
       return new S3MultipartUploadCommitPartRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index aadc126..7f2d2c5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -456,6 +456,7 @@ public final class OMFileRequest {
       // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
       // keyName field stores only the leaf node name, which is 'file1'.
       omFileInfo.setKeyName(fileName);
+      omFileInfo.setFileName(fileName);
       keyInfoOptional = Optional.of(omFileInfo);
     }
 
@@ -481,6 +482,7 @@ public final class OMFileRequest {
     // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
     // keyName field stores only the leaf node name, which is 'file1'.
     omFileInfo.setKeyName(fileName);
+    omFileInfo.setFileName(fileName);
 
     omMetadataManager.getKeyTable().addCacheEntry(
             new CacheKey<>(dbFileKey),
@@ -511,6 +513,30 @@ public final class OMFileRequest {
   }
 
   /**
+   * Adding multipart omKeyInfo to open file table.
+   *
+   * @param omMetadataMgr OM Metadata Manager
+   * @param batchOp       batch of db operations
+   * @param omFileInfo    omKeyInfo
+   * @param uploadID      uploadID
+   * @return multipartFileKey
+   * @throws IOException DB failure
+   */
+  public static String addToOpenFileTable(OMMetadataManager omMetadataMgr,
+      BatchOperation batchOp, OmKeyInfo omFileInfo, String uploadID)
+          throws IOException {
+
+    String multipartFileKey = omMetadataMgr.getMultipartKey(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName(),
+            uploadID);
+
+    omMetadataMgr.getOpenKeyTable().putWithBatch(batchOp, multipartFileKey,
+            omFileInfo);
+
+    return multipartFileKey;
+  }
+
+  /**
    * Adding omKeyInfo to file table.
    *
    * @param omMetadataMgr
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
new file mode 100644
index 0000000..3507090
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
+
+/**
+ * Handles initiate multipart upload request.
+ */
+public class S3InitiateMultipartUploadRequestV1
+        extends S3InitiateMultipartUploadRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(S3InitiateMultipartUploadRequestV1.class);
+
+  public S3InitiateMultipartUploadRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long transactionLogIndex,
+      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
+    MultipartInfoInitiateRequest multipartInfoInitiateRequest =
+        getOmRequest().getInitiateMultiPartUploadRequest();
+
+    KeyArgs keyArgs =
+        multipartInfoInitiateRequest.getKeyArgs();
+
+    Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
+
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    final String requestedVolume = volumeName;
+    final String requestedBucket = bucketName;
+    String keyName = keyArgs.getKeyName();
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    ozoneManager.getMetrics().incNumInitiateMultipartUploads();
+    boolean acquiredBucketLock = false;
+    IOException exception = null;
+    OmMultipartKeyInfo multipartKeyInfo = null;
+    OmKeyInfo omKeyInfo = null;
+    List<OmDirectoryInfo> missingParentInfos;
+    Result result = null;
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    OMClientResponse omClientResponse = null;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // TODO to support S3 ACL later.
+      acquiredBucketLock =
+          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      // If KMS is configured and TDE is enabled on bucket, throw MPU not
+      // supported.
+      if (ozoneManager.getKmsProvider() != null) {
+        if (omMetadataManager.getBucketTable().get(
+            omMetadataManager.getBucketKey(volumeName, bucketName))
+            .getEncryptionKeyInfo() != null) {
+          throw new OMException("MultipartUpload is not yet supported on " +
+              "encrypted buckets", NOT_SUPPORTED_OPERATION);
+        }
+      }
+
+      OMFileRequest.OMPathInfoV1 pathInfoV1 =
+              OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+                      volumeName, bucketName, keyName, Paths.get(keyName));
+
+      // check if the directory already existed in OM
+      checkDirectoryResult(keyName, pathInfoV1.getDirectoryResult());
+
+      // add all missing parents to dir table
+      missingParentInfos =
+              OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
+                      ozoneManager, keyArgs, pathInfoV1, transactionLogIndex);
+
+      // We are adding uploadId to key, because if multiple users try to
+      // perform multipart upload on the same key, each will try to upload, who
+      // ever finally commit the key, we see that key in ozone. Suppose if we
+      // don't add id, and use the same key /volume/bucket/key, when multiple
+      // users try to upload the key, we update the parts of the key's from
+      // multiple users to same key, and the key output can be a mix of the
+      // parts from multiple users.
+
+      // So on same key if multiple time multipart upload is initiated we
+      // store multiple entries in the openKey Table.
+      // Checked AWS S3, when we try to run multipart upload, each time a
+      // new uploadId is returned. And also even if a key exist when initiate
+      // multipart upload request is received, it returns multipart upload id
+      // for the key.
+
+      String multipartKey = omMetadataManager.getMultipartKey(
+              pathInfoV1.getLastKnownParentId(), pathInfoV1.getLeafNodeName(),
+              keyArgs.getMultipartUploadID());
+
+      // Even if this key already exists in the KeyTable, it would be taken
+      // care of in the final complete multipart upload. AWS S3 behavior is
+      // also like this, even when key exists in a bucket, user can still
+      // initiate MPU.
+
+      multipartKeyInfo = new OmMultipartKeyInfo.Builder()
+          .setUploadID(keyArgs.getMultipartUploadID())
+          .setCreationTime(keyArgs.getModificationTime())
+          .setReplicationType(keyArgs.getType())
+          .setReplicationFactor(keyArgs.getFactor())
+          .setObjectID(pathInfoV1.getLeafNodeObjectId())
+          .setUpdateID(transactionLogIndex)
+          .setParentID(pathInfoV1.getLastKnownParentId())
+          .build();
+
+      omKeyInfo = new OmKeyInfo.Builder()
+          .setVolumeName(volumeName)
+          .setBucketName(bucketName)
+          .setKeyName(keyArgs.getKeyName())
+          .setCreationTime(keyArgs.getModificationTime())
+          .setModificationTime(keyArgs.getModificationTime())
+          .setReplicationType(keyArgs.getType())
+          .setReplicationFactor(keyArgs.getFactor())
+          .setOmKeyLocationInfos(Collections.singletonList(
+              new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+          .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()))
+          .setObjectID(pathInfoV1.getLeafNodeObjectId())
+          .setUpdateID(transactionLogIndex)
+          .setParentObjectID(pathInfoV1.getLastKnownParentId())
+          .build();
+
+      // Add cache entries for the prefix directories.
+      // Skip adding for the file key itself, until Key Commit.
+      OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+              Optional.absent(), Optional.of(missingParentInfos),
+              transactionLogIndex);
+
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
+              multipartKey, omKeyInfo, pathInfoV1.getLeafNodeName(),
+              transactionLogIndex);
+
+      // Add to cache
+      omMetadataManager.getMultipartInfoTable().addCacheEntry(
+          new CacheKey<>(multipartKey),
+          new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
+
+      omClientResponse =
+          new S3InitiateMultipartUploadResponseV1(
+              omResponse.setInitiateMultiPartUploadResponse(
+                  MultipartInfoInitiateResponse.newBuilder()
+                      .setVolumeName(requestedVolume)
+                      .setBucketName(requestedBucket)
+                      .setKeyName(keyName)
+                      .setMultipartUploadID(keyArgs.getMultipartUploadID()))
+                  .build(), multipartKeyInfo, omKeyInfo, missingParentInfos);
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new S3InitiateMultipartUploadResponse(
+          createErrorOMResponse(omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(transactionLogIndex, omClientResponse,
+          ozoneManagerDoubleBufferHelper);
+      if (acquiredBucketLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK,
+            volumeName, bucketName);
+      }
+    }
+
+    // audit log
+    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+        OMAction.INITIATE_MULTIPART_UPLOAD, auditMap,
+        exception, getOmRequest().getUserInfo()));
+
+    switch (result) {
+    case SUCCESS:
+      LOG.debug("S3 InitiateMultipart Upload request for Key {} in " +
+              "Volume/Bucket {}/{} is successfully completed", keyName,
+          volumeName, bucketName);
+      break;
+    case FAILURE:
+      ozoneManager.getMetrics().incNumInitiateMultipartUploadFails();
+      LOG.error("S3 InitiateMultipart Upload request for Key {} in " +
+              "Volume/Bucket {}/{} is failed", keyName, volumeName, bucketName,
+          exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}",
+          multipartInfoInitiateRequest);
+    }
+
+    return omClientResponse;
+  }
+
+  /**
+   * Verify om directory result.
+   *
+   * @param keyName           key name
+   * @param omDirectoryResult directory result
+   * @throws OMException if file or directory or file exists in the given path
+   */
+  private void checkDirectoryResult(String keyName,
+      OMFileRequest.OMDirectoryResult omDirectoryResult) throws OMException {
+    if (omDirectoryResult == DIRECTORY_EXISTS) {
+      throw new OMException("Can not write to directory: " + keyName,
+              OMException.ResultCodes.NOT_A_FILE);
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index c4de2fb..4590889 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -143,9 +143,10 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
       // Check for directory exists with same name, if it exists throw error. 
       if (ozoneManager.getEnableFileSystemPaths()) {
         if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
-            omMetadataManager)) {
+                omMetadataManager)) {
           throw new OMException("Can not Complete MPU for file: " + keyName +
-              " as there is already directory in the given path", NOT_A_FILE);
+                  " as there is already directory in the given path",
+                  NOT_A_FILE);
         }
       }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseV1.java
new file mode 100644
index 0000000..ff3e63f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseV1.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for S3 Initiate Multipart Upload request for layout V1.
+ */
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE, OPEN_FILE_TABLE,
+        MULTIPARTFILEINFO_TABLE})
+public class S3InitiateMultipartUploadResponseV1 extends
+        S3InitiateMultipartUploadResponse {
+  private List<OmDirectoryInfo> parentDirInfos;
+
+  public S3InitiateMultipartUploadResponseV1(
+      @Nonnull OMResponse omResponse,
+      @Nonnull OmMultipartKeyInfo omMultipartKeyInfo,
+      @Nonnull OmKeyInfo omKeyInfo,
+      @Nonnull List<OmDirectoryInfo> parentDirInfos) {
+    super(omResponse, omMultipartKeyInfo, omKeyInfo);
+    this.parentDirInfos = parentDirInfos;
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    /**
+     * Create parent directory entries during MultiPartFileKey Create - do not
+     * wait for File Commit request.
+     */
+    if (parentDirInfos != null) {
+      for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+        String parentKey = parentDirInfo.getPath();
+        omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
+                parentKey, parentDirInfo);
+      }
+    }
+
+    String multipartFileKey =
+            OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation,
+                    getOmKeyInfo(), getOmMultipartKeyInfo().getUploadID());
+
+    omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation,
+            multipartFileKey, getOmMultipartKeyInfo());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
new file mode 100644
index 0000000..dac2efe
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Tests S3 Initiate Multipart Upload request.
+ */
+public class TestS3InitiateMultipartUploadRequestV1
+    extends TestS3InitiateMultipartUploadRequest {
+
+  @Test
+  public void testValidateAndUpdateCache() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String prefix = "a/b/c/";
+    List<String> dirs = new ArrayList<String>();
+    dirs.add("a");
+    dirs.add("b");
+    dirs.add("c");
+    String fileName = UUID.randomUUID().toString();
+    String keyName = prefix + fileName;
+
+    // Add volume and bucket to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+        omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest modifiedRequest = doPreExecuteInitiateMPUV1(volumeName,
+        bucketName, keyName);
+
+    S3InitiateMultipartUploadRequestV1 s3InitiateMultipartUploadRequestV1 =
+        new S3InitiateMultipartUploadRequestV1(modifiedRequest);
+
+    OMClientResponse omClientResponse =
+            s3InitiateMultipartUploadRequestV1.validateAndUpdateCache(
+                    ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    long parentID = verifyDirectoriesInDB(dirs, bucketID);
+
+    String multipartFileKey = omMetadataManager.getMultipartKey(parentID,
+            fileName, modifiedRequest.getInitiateMultiPartUploadRequest()
+                    .getKeyArgs().getMultipartUploadID());
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+            .get(multipartFileKey);
+    Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
+    Assert.assertEquals("FileName mismatches!", fileName,
+            omKeyInfo.getKeyName());
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omKeyInfo.getParentObjectID());
+
+    OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager
+            .getMultipartInfoTable().get(multipartFileKey);
+    Assert.assertNotNull("Failed to find the multipartFileInfo",
+            omMultipartKeyInfo);
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omMultipartKeyInfo.getParentID());
+
+    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID(),
+        omMultipartKeyInfo
+            .getUploadID());
+
+    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
+        .getKeyArgs().getModificationTime(),
+        omKeyInfo
+        .getModificationTime());
+    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getModificationTime(),
+        omKeyInfo
+            .getCreationTime());
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
+
+    OMRequest modifiedRequest = doPreExecuteInitiateMPU(
+        volumeName, bucketName, keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+        new S3InitiateMultipartUploadRequest(modifiedRequest);
+
+    OMClientResponse omClientResponse =
+        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
+            100L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
+        omClientResponse.getOMResponse().getStatus());
+
+    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID());
+
+    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isEmpty());
+    Assert.assertTrue(omMetadataManager.getMultipartInfoTable().isEmpty());
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, bucketName,
+        keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+        new S3InitiateMultipartUploadRequest(modifiedRequest);
+
+    OMClientResponse omClientResponse =
+        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
+            100L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
+        omClientResponse.getOMResponse().getStatus());
+
+    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isEmpty());
+    Assert.assertTrue(omMetadataManager.getMultipartInfoTable().isEmpty());
+  }
+
+  private long verifyDirectoriesInDB(List<String> dirs, long bucketID)
+      throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      OmDirectoryInfo omDirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      Assert.assertNotNull("Invalid directory!", omDirInfo);
+      Assert.assertEquals("Invalid directory!", dirName, omDirInfo.getName());
+      Assert.assertEquals("Invalid dir path!",
+              parentID + "/" + dirName, omDirInfo.getPath());
+      parentID = omDirInfo.getObjectID();
+    }
+    return parentID;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index f2c5b66..641ee8d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -218,4 +218,33 @@ public class TestS3MultipartRequest {
   }
 
 
+  /**
+   * Perform preExecute of Initiate Multipart upload request for given
+   * volume, bucket and key name.
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @return OMRequest - returned from preExecute.
+   */
+  protected OMRequest doPreExecuteInitiateMPUV1(
+      String volumeName, String bucketName, String keyName) throws Exception {
+    OMRequest omRequest =
+            TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName,
+                    keyName);
+
+    S3InitiateMultipartUploadRequestV1 s3InitiateMultipartUploadRequestV1 =
+            new S3InitiateMultipartUploadRequestV1(omRequest);
+
+    OMRequest modifiedRequest =
+            s3InitiateMultipartUploadRequestV1.preExecute(ozoneManager);
+
+    Assert.assertNotEquals(omRequest, modifiedRequest);
+    Assert.assertTrue(modifiedRequest.hasInitiateMultiPartUploadRequest());
+    Assert.assertNotNull(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID());
+    Assert.assertTrue(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getModificationTime() > 0);
+
+    return modifiedRequest;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
index 4996bd0..03065ab 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
@@ -31,7 +31,7 @@ public class TestS3InitiateMultipartUploadResponse
     extends TestS3MultipartResponse {
 
   @Test
-  public void addDBToBatch() throws Exception {
+  public void testAddDBToBatch() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     String keyName = UUID.randomUUID().toString();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java
new file mode 100644
index 0000000..31f9e5a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Class tests S3 Initiate MPU response.
+ */
+public class TestS3InitiateMultipartUploadResponseV1
+    extends TestS3InitiateMultipartUploadResponse {
+
+  @Test
+  public void testAddDBToBatch() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String prefix = "a/b/c/d/";
+    List<String> dirs = new ArrayList<String>();
+    dirs.add("a");
+    dirs.add("b");
+    dirs.add("c");
+    dirs.add("d");
+    String fileName = UUID.randomUUID().toString();
+    String keyName = prefix + fileName;
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    long parentID = 1027; // assume objectID of dir path "a/b/c/d" is 1027
+    List<OmDirectoryInfo> parentDirInfos = new ArrayList<>();
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+            createS3InitiateMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, parentDirInfos);
+
+    s3InitiateMultipartUploadResponseV1.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    // Do manual commit and see whether addToBatch is successful or not.
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(multipartKey);
+    Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
+    Assert.assertEquals("FileName mismatches!", fileName,
+            omKeyInfo.getKeyName());
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omKeyInfo.getParentObjectID());
+
+    OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager
+            .getMultipartInfoTable().get(multipartKey);
+    Assert.assertNotNull("Failed to find the multipartFileInfo",
+            omMultipartKeyInfo);
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omMultipartKeyInfo.getParentID());
+
+    Assert.assertEquals("Upload Id mismatches!", multipartUploadID,
+            omMultipartKeyInfo.getUploadID());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 4f50d9e..76ceb0e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.response.s3.multipart;
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.List;
 import java.util.UUID;
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -30,9 +31,11 @@ import org.junit.Rule;
 import org.junit.rules.TemporaryFolder;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .KeyInfo;
@@ -152,4 +155,47 @@ public class TestS3MultipartResponse {
             .setType(HddsProtos.ReplicationType.RATIS)
             .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
   }
+
+
+  public S3InitiateMultipartUploadResponse createS3InitiateMPUResponseV1(
+      String volumeName, String bucketName, long parentID, String keyName,
+      String multipartUploadID, List<OmDirectoryInfo> parentDirInfos) {
+    OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo.Builder()
+            .setUploadID(multipartUploadID)
+            .setCreationTime(Time.now())
+            .setReplicationType(HddsProtos.ReplicationType.RATIS)
+            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
+            .setParentID(parentID)
+            .build();
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+
+    OmKeyInfo omKeyInfo = new OmKeyInfo.Builder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(fileName)
+            .setFileName(fileName)
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setReplicationType(HddsProtos.ReplicationType.RATIS)
+            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+            .setParentObjectID(parentID)
+            .build();
+
+    OMResponse omResponse = OMResponse.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload)
+            .setStatus(OzoneManagerProtocolProtos.Status.OK)
+            .setSuccess(true).setInitiateMultiPartUploadResponse(
+                    OzoneManagerProtocolProtos.MultipartInfoInitiateResponse
+                            .newBuilder().setVolumeName(volumeName)
+                            .setBucketName(bucketName)
+                            .setKeyName(keyName)
+                            .setMultipartUploadID(multipartUploadID)).build();
+
+    return new S3InitiateMultipartUploadResponseV1(omResponse, multipartKeyInfo,
+            omKeyInfo, parentDirInfos);
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 10/19: HDDS-4658. LookupKey: do lookup in dir and file tables (#1775)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 6c9b41f1183968c4ea1dc829ac7202efd959a710
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Jan 13 08:36:37 2021 +0530

    HDDS-4658. LookupKey: do lookup in dir and file tables (#1775)
---
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 15 ++++
 .../hadoop/ozone/client/rpc/TestReadRetries.java   | 55 +++++++++++---
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 83 ++++++++++++++++++++++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 32 +++++++--
 .../ozone/om/request/key/OMKeyCreateRequestV1.java |  3 +-
 5 files changed, 175 insertions(+), 13 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index 2938714..e574e94 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -325,6 +325,11 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
    */
   @Test
   public void testRenameWithNonExistentSource() throws Exception {
+    // Skip as this will run only in new layout
+    if (!enabledFileSystemPaths) {
+      return;
+    }
+
     final String root = "/root";
     final String dir1 = root + "/dir1";
     final String dir2 = root + "/dir2";
@@ -350,6 +355,11 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
    */
   @Test
   public void testRenameDirToItsOwnSubDir() throws Exception {
+    // Skip as this will run only in new layout
+    if (!enabledFileSystemPaths) {
+      return;
+    }
+
     final String root = "/root";
     final String dir1 = root + "/dir1";
     final Path dir1Path = new Path(fs.getUri().toString() + dir1);
@@ -377,6 +387,11 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
    */
   @Test
   public void testRenameDestinationParentDoesntExist() throws Exception {
+    // Skip as this will run only in new layout
+    if (!enabledFileSystemPaths) {
+      return;
+    }
+
     final String root = "/root_dir";
     final String dir1 = root + "/dir1";
     final String dir2 = dir1 + "/dir2";
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index af95190..ce2f10b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.ozone.client.rpc;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.UUID;
@@ -47,24 +49,32 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.KeyOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.junit.Assert.fail;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.rules.ExpectedException;
 
 /**
  * Test read retries from multiple nodes in the pipeline.
  */
+@RunWith(Parameterized.class)
 public class TestReadRetries {
 
   /**
@@ -84,16 +94,27 @@ public class TestReadRetries {
       storageContainerLocationClient;
 
   private static final String SCM_ID = UUID.randomUUID().toString();
+  private String layoutVersion;
 
+  public TestReadRetries(String layoutVersion) {
+    this.layoutVersion = layoutVersion;
+  }
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[]{"V0"}, new Object[]{"V1"});
+  }
 
   /**
    * Create a MiniOzoneCluster for testing.
    * @throws Exception
    */
-  @BeforeClass
-  public static void init() throws Exception {
+  @Before
+  public void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, layoutVersion);
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .setScmId(SCM_ID)
@@ -112,8 +133,8 @@ public class TestReadRetries {
   /**
    * Close OzoneClient and shutdown MiniOzoneCluster.
    */
-  @AfterClass
-  public static void shutdown() throws IOException {
+  @After
+  public void shutdown() throws IOException {
     if(ozClient != null) {
       ozClient.close();
     }
@@ -140,7 +161,7 @@ public class TestReadRetries {
     volume.createBucket(bucketName);
     OzoneBucket bucket = volume.getBucket(bucketName);
 
-    String keyName = UUID.randomUUID().toString();
+    String keyName = "a/b/c/" + UUID.randomUUID().toString();
 
     OzoneOutputStream out = bucket
         .createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS,
@@ -188,6 +209,13 @@ public class TestReadRetries {
     cluster.shutdownHddsDatanode(datanodeDetails);
     // try to read, this should be successful
     readKey(bucket, keyName, value);
+
+    // read intermediate directory
+    verifyIntermediateDir(bucket, "a/b/c/");
+    verifyIntermediateDir(bucket, "a/b/c");
+    verifyIntermediateDir(bucket, "/a/b/c/");
+    verifyIntermediateDir(bucket, "/a/b/c");
+
     // shutdown the second datanode
     datanodeDetails = datanodes.get(1);
     cluster.shutdownHddsDatanode(datanodeDetails);
@@ -210,6 +238,17 @@ public class TestReadRetries {
     factory.releaseClient(clientSpi, false);
   }
 
+  private void verifyIntermediateDir(OzoneBucket bucket,
+      String dir) throws IOException {
+    try {
+      bucket.getKey(dir);
+      fail("Should throw exception for directory listing");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ome.getResult());
+    }
+  }
+
   private void readKey(OzoneBucket bucket, String keyName, String data)
       throws IOException {
     OzoneKey key = bucket.getKey(keyName);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index c6ae4ca..ee127cf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -26,8 +26,10 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
@@ -43,6 +45,9 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.UUID;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
 public class TestObjectStoreV1 {
 
   private static MiniOzoneCluster cluster = null;
@@ -133,6 +138,84 @@ public class TestObjectStoreV1 {
             true);
   }
 
+  @Test
+  public void testLookupKey() throws Exception {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String parent = "a/b/c/";
+    String file = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + file;
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    objectStore.createVolume(volumeName);
+
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    ozoneVolume.createBucket(bucketName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    Table<String, OmKeyInfo> openKeyTable =
+            cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
+
+    String data = "random data";
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+            data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
+            new HashMap<>());
+
+    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+
+    // after file creation
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            false);
+
+    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+
+    // open key
+    try {
+      ozoneBucket.getKey(key);
+      fail("Should throw exception as file is not visible and its still " +
+              "open for writing!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
+    }
+
+    ozoneOutputStream.close();
+
+    OzoneKeyDetails keyDetails = ozoneBucket.getKey(key);
+    Assert.assertEquals(key, keyDetails.getName());
+
+    Table<String, OmKeyInfo> keyTable =
+            cluster.getOzoneManager().getMetadataManager().getKeyTable();
+
+    // When closing the key, entry should be removed from openFileTable
+    // and it should be added to fileTable.
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            true);
+
+    ozoneBucket.deleteKey(key);
+
+    // get deleted key
+    try {
+      ozoneBucket.getKey(key);
+      fail("Should throw exception as file not exists!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
+    }
+
+    // after key delete
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), true);
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            true);
+  }
+
   private OmDirectoryInfo getDirInfo(String volumeName, String bucketName,
       String parentKey) throws Exception {
     OMMetadataManager omMetadataManager =
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index dc70369..db28ff7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -658,9 +658,11 @@ public class KeyManagerImpl implements KeyManager {
         bucketName);
     OmKeyInfo value = null;
     try {
-      String keyBytes = metadataManager.getOzoneKey(
-          volumeName, bucketName, keyName);
-      value = metadataManager.getKeyTable().get(keyBytes);
+      if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+        value = getOmKeyInfoV1(volumeName, bucketName, keyName);
+      } else {
+        value = getOmKeyInfo(volumeName, bucketName, keyName);
+      }
     } catch (IOException ex) {
       if (ex instanceof OMException) {
         throw ex;
@@ -680,7 +682,7 @@ public class KeyManagerImpl implements KeyManager {
         LOG.debug("volume:{} bucket:{} Key:{} not found", volumeName,
                 bucketName, keyName);
       }
-      throw new OMException("Key not found", KEY_NOT_FOUND);
+      throw new OMException("Key:" + keyName + " not found", KEY_NOT_FOUND);
     }
 
     // add block token for read.
@@ -697,6 +699,28 @@ public class KeyManagerImpl implements KeyManager {
     return value;
   }
 
+  private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+                                 String keyName) throws IOException {
+    String keyBytes = metadataManager.getOzoneKey(
+            volumeName, bucketName, keyName);
+    return metadataManager.getKeyTable().get(keyBytes);
+  }
+
+  /**
+   * Look up will return only closed fileInfo. This will return null if the
+   * keyName is a directory or if the keyName is still open for writing.
+   */
+  private OmKeyInfo getOmKeyInfoV1(String volumeName, String bucketName,
+                                   String keyName) throws IOException {
+    OzoneFileStatus fileStatus =
+            OMFileRequest.getOMKeyInfoIfExists(metadataManager,
+                    volumeName, bucketName, keyName, scmBlockSize);
+    if (fileStatus == null) {
+      return null;
+    }
+    return fileStatus.isFile() ? fileStatus.getKeyInfo() : null;
+  }
+
   private void addBlockToken4Read(OmKeyInfo value) throws IOException {
     Preconditions.checkNotNull(value, "OMKeyInfo cannot be null");
     if (grpcBlockTokenEnabled) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
index 416e462..a49c01e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
@@ -196,8 +196,9 @@ public class OMKeyCreateRequestV1 extends OMKeyCreateRequest {
 
       // Prepare response. Sets user given full key name in the 'keyName'
       // attribute in response object.
+      int clientVersion = getOmRequest().getVersion();
       omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
-              .setKeyInfo(omFileInfo.getProtobuf(keyName))
+              .setKeyInfo(omFileInfo.getProtobuf(keyName, clientVersion))
               .setID(clientID)
               .setOpenVersion(openVersion).build())
               .setCmdType(Type.CreateKey);


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 11/19: HDDS-4717. Fix TestOzoneFileSystemV1 and TestObjectStoreV1 cases (#1815)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit ea6dc1c1d04adc78ce80406fc5b7b531dce30949
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Fri Jan 22 21:15:11 2021 +0530

    HDDS-4717. Fix TestOzoneFileSystemV1 and TestObjectStoreV1 cases (#1815)
---
 .../hadoop/ozone/client/io/KeyOutputStream.java    |   8 +
 .../apache/hadoop/fs/ozone/TestOzoneDirectory.java |  26 ---
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |   9 +-
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 192 ++++++++++++---------
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 112 ++++++++----
 .../ozone/om/request/key/OMKeyDeleteRequestV1.java |   6 +
 6 files changed, 215 insertions(+), 138 deletions(-)

diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index b2a4e92..106ed45 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -92,6 +92,8 @@ public class KeyOutputStream extends OutputStream {
   private boolean isException;
   private final BlockOutputStreamEntryPool blockOutputStreamEntryPool;
 
+  private long clientID;
+
   /**
    * A constructor for testing purpose only.
    */
@@ -127,6 +129,11 @@ public class KeyOutputStream extends OutputStream {
     return retryCount;
   }
 
+  @VisibleForTesting
+  public long getClientID() {
+    return clientID;
+  }
+
   @SuppressWarnings({"parameternumber", "squid:S00107"})
   public KeyOutputStream(
       OzoneClientConfig config,
@@ -158,6 +165,7 @@ public class KeyOutputStream extends OutputStream {
     this.retryCount = 0;
     this.isException = false;
     this.writeOffset = 0;
+    this.clientID = handler.getId();
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
index 87e9f09..56c6177 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
@@ -23,8 +23,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.TestDataUtil;
@@ -33,7 +31,6 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -49,7 +46,6 @@ import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
-import static org.junit.Assert.fail;
 
 /**
  * Test verifies the entries and operations in directory table.
@@ -95,28 +91,6 @@ public class TestOzoneDirectory {
     Assert.assertEquals("Wrong OM numKeys metrics",
             4, cluster.getOzoneManager().getMetrics().getNumKeys());
 
-    // verify entries in directory table
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmDirectoryInfo>> iterator =
-            omMgr.getDirectoryTable().iterator();
-    iterator.seekToFirst();
-    int count = dirKeys.size();
-    Assert.assertEquals("Unexpected directory table entries!", 4, count);
-    while (iterator.hasNext()) {
-      count--;
-      Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
-      verifyKeyFormat(value.getKey(), dirKeys);
-    }
-    Assert.assertEquals("Unexpected directory table entries!", 0, count);
-
-    // verify entries in key table
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmKeyInfo>> keyTableItr =
-            omMgr.getKeyTable().iterator();
-    while (keyTableItr.hasNext()) {
-      fail("Shouldn't add any entries in KeyTable!");
-    }
-
     // create sub-dirs under same parent
     Path subDir5 = new Path("/d1/d2/d3/d4/d5");
     fs.mkdirs(subDir5);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 44ea7b9..bf9e09f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -598,10 +598,17 @@ public class TestOzoneFileSystem {
     ArrayList<String> actualPathList = new ArrayList<>();
     if (rootItemCount != fileStatuses.length) {
       for (int i = 0; i < fileStatuses.length; i++) {
-        actualPaths.add(fileStatuses[i].getPath().getName());
+        boolean duplicate =
+                actualPaths.add(fileStatuses[i].getPath().getName());
+        if (!duplicate) {
+          LOG.info("Duplicate path:{} in FileStatusList",
+                  fileStatuses[i].getPath().getName());
+        }
         actualPathList.add(fileStatuses[i].getPath().getName());
       }
       if (rootItemCount != actualPathList.size()) {
+        LOG.info("actualPathsSize: {}", actualPaths.size());
+        LOG.info("actualPathListSize: {}", actualPathList.size());
         actualPaths.removeAll(paths);
         actualPathList.removeAll(paths);
         LOG.info("actualPaths: {}", actualPaths);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index ee127cf..b88bbc3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -17,17 +17,23 @@
 package org.apache.hadoop.ozone.om;
 
 import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.KeyOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
@@ -36,6 +42,7 @@ import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
 import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -45,6 +52,8 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.UUID;
 
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -55,6 +64,9 @@ public class TestObjectStoreV1 {
   private static String clusterId;
   private static String scmId;
   private static String omId;
+  private static String volumeName;
+  private static String bucketName;
+  private static FileSystem fs;
 
   @Rule
   public Timeout timeout = new Timeout(240000);
@@ -78,12 +90,51 @@ public class TestObjectStoreV1 {
             .setOmId(omId)
             .build();
     cluster.waitForClusterToBeReady();
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+            bucket.getVolumeName());
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+    fs = FileSystem.get(conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    deleteRootDir();
+  }
+
+  /**
+   * Cleanup files and directories.
+   *
+   * @throws IOException DB failure
+   */
+  private void deleteRootDir() throws IOException {
+    Path root = new Path("/");
+    FileStatus[] fileStatuses = fs.listStatus(root);
+
+    if (fileStatuses == null) {
+      return;
+    }
+
+    for (FileStatus fStatus : fileStatuses) {
+      fs.delete(fStatus.getPath(), true);
+    }
+
+    fileStatuses = fs.listStatus(root);
+    if (fileStatuses != null) {
+      Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
+    }
   }
 
   @Test
   public void testCreateKey() throws Exception {
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
     String parent = "a/b/c/";
     String file = "key" + RandomStringUtils.randomNumeric(5);
     String key = parent + file;
@@ -91,74 +142,67 @@ public class TestObjectStoreV1 {
     OzoneClient client = cluster.getClient();
 
     ObjectStore objectStore = client.getObjectStore();
-    objectStore.createVolume(volumeName);
-
     OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
     Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
-    ozoneVolume.createBucket(bucketName);
-
     OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
     Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
 
-    Table<String, OmKeyInfo> openKeyTable =
+    Table<String, OmKeyInfo> openFileTable =
             cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
 
     // before file creation
-    verifyKeyInFileTable(openKeyTable, file, 0, true);
+    verifyKeyInFileTable(openFileTable, file, 0, true);
 
     String data = "random data";
     OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
             data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
             new HashMap<>());
 
-    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    KeyOutputStream keyOutputStream =
+            (KeyOutputStream) ozoneOutputStream.getOutputStream();
+    long clientID = keyOutputStream.getClientID();
+
+    OmDirectoryInfo dirPathC = getDirInfo(parent);
     Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
 
     // after file creation
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            false);
+    verifyKeyInOpenFileTable(openFileTable, clientID, file,
+            dirPathC.getObjectID(), false);
 
     ozoneOutputStream.write(data.getBytes(), 0, data.length());
     ozoneOutputStream.close();
 
-    Table<String, OmKeyInfo> keyTable =
+    Table<String, OmKeyInfo> fileTable =
             cluster.getOzoneManager().getMetadataManager().getKeyTable();
-
     // After closing the file. File entry should be removed from openFileTable
     // and it should be added to fileTable.
-    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            true);
+    verifyKeyInFileTable(fileTable, file, dirPathC.getObjectID(), false);
+    verifyKeyInOpenFileTable(openFileTable, clientID, file,
+            dirPathC.getObjectID(), true);
 
     ozoneBucket.deleteKey(key);
 
     // after key delete
-    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), true);
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            true);
+    verifyKeyInFileTable(fileTable, file, dirPathC.getObjectID(), true);
+    verifyKeyInOpenFileTable(openFileTable, clientID, file,
+            dirPathC.getObjectID(), true);
   }
 
   @Test
   public void testLookupKey() throws Exception {
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
     String parent = "a/b/c/";
-    String file = "key" + RandomStringUtils.randomNumeric(5);
-    String key = parent + file;
+    String fileName = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + fileName;
 
     OzoneClient client = cluster.getClient();
 
     ObjectStore objectStore = client.getObjectStore();
-    objectStore.createVolume(volumeName);
-
     OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
     Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
-    ozoneVolume.createBucket(bucketName);
-
     OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
     Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
 
-    Table<String, OmKeyInfo> openKeyTable =
+    Table<String, OmKeyInfo> openFileTable =
             cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
 
     String data = "random data";
@@ -166,19 +210,23 @@ public class TestObjectStoreV1 {
             data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
             new HashMap<>());
 
-    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    KeyOutputStream keyOutputStream =
+            (KeyOutputStream) ozoneOutputStream.getOutputStream();
+    long clientID = keyOutputStream.getClientID();
+
+    OmDirectoryInfo dirPathC = getDirInfo(parent);
     Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
 
     // after file creation
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            false);
+    verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
+            dirPathC.getObjectID(), false);
 
     ozoneOutputStream.write(data.getBytes(), 0, data.length());
 
     // open key
     try {
       ozoneBucket.getKey(key);
-      fail("Should throw exception as file is not visible and its still " +
+      fail("Should throw exception as fileName is not visible and its still " +
               "open for writing!");
     } catch (OMException ome) {
       // expected
@@ -190,34 +238,33 @@ public class TestObjectStoreV1 {
     OzoneKeyDetails keyDetails = ozoneBucket.getKey(key);
     Assert.assertEquals(key, keyDetails.getName());
 
-    Table<String, OmKeyInfo> keyTable =
+    Table<String, OmKeyInfo> fileTable =
             cluster.getOzoneManager().getMetadataManager().getKeyTable();
 
     // When closing the key, entry should be removed from openFileTable
     // and it should be added to fileTable.
-    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            true);
+    verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), false);
+    verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
+            dirPathC.getObjectID(), true);
 
     ozoneBucket.deleteKey(key);
 
     // get deleted key
     try {
       ozoneBucket.getKey(key);
-      fail("Should throw exception as file not exists!");
+      fail("Should throw exception as fileName not exists!");
     } catch (OMException ome) {
       // expected
       assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
     }
 
     // after key delete
-    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), true);
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            true);
+    verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), true);
+    verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
+            dirPathC.getObjectID(), true);
   }
 
-  private OmDirectoryInfo getDirInfo(String volumeName, String bucketName,
-      String parentKey) throws Exception {
+  private OmDirectoryInfo getDirInfo(String parentKey) throws Exception {
     OMMetadataManager omMetadataManager =
             cluster.getOzoneManager().getMetadataManager();
     long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
@@ -238,51 +285,38 @@ public class TestObjectStoreV1 {
 
   private void verifyKeyInFileTable(Table<String, OmKeyInfo> fileTable,
       String fileName, long parentID, boolean isEmpty) throws IOException {
-    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator
-            = fileTable.iterator();
 
+    String dbFileKey = parentID + OM_KEY_PREFIX + fileName;
+    OmKeyInfo omKeyInfo = fileTable.get(dbFileKey);
     if (isEmpty) {
-      Assert.assertTrue("Table is not empty!", fileTable.isEmpty());
+      Assert.assertNull("Table is not empty!", omKeyInfo);
     } else {
-      Assert.assertFalse("Table is empty!", fileTable.isEmpty());
-      while (iterator.hasNext()) {
-        Table.KeyValue<String, OmKeyInfo> next = iterator.next();
-        Assert.assertEquals("Invalid Key: " + next.getKey(),
-                parentID + "/" + fileName, next.getKey());
-        OmKeyInfo omKeyInfo = next.getValue();
-        Assert.assertEquals("Invalid Key", fileName,
-                omKeyInfo.getFileName());
-        Assert.assertEquals("Invalid Key", fileName,
-                omKeyInfo.getKeyName());
-        Assert.assertEquals("Invalid Key", parentID,
-                omKeyInfo.getParentObjectID());
-      }
+      Assert.assertNotNull("Table is empty!", omKeyInfo);
+      // used startsWith because the key format is,
+      // <parentID>/fileName/<clientID> and clientID is not visible.
+      Assert.assertEquals("Invalid Key: " + omKeyInfo.getObjectInfo(),
+              omKeyInfo.getKeyName(), fileName);
+      Assert.assertEquals("Invalid Key", parentID,
+              omKeyInfo.getParentObjectID());
     }
   }
 
   private void verifyKeyInOpenFileTable(Table<String, OmKeyInfo> openFileTable,
-      String fileName, long parentID, boolean isEmpty) throws IOException {
-    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator
-            = openFileTable.iterator();
-
+      long clientID, String fileName, long parentID, boolean isEmpty)
+          throws IOException {
+    String dbOpenFileKey =
+            parentID + OM_KEY_PREFIX + fileName + OM_KEY_PREFIX + clientID;
+    OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
     if (isEmpty) {
-      Assert.assertTrue("Table is not empty!", openFileTable.isEmpty());
+      Assert.assertNull("Table is not empty!", omKeyInfo);
     } else {
-      Assert.assertFalse("Table is empty!", openFileTable.isEmpty());
-      while (iterator.hasNext()) {
-        Table.KeyValue<String, OmKeyInfo> next = iterator.next();
-        // used startsWith because the key format is,
-        // <parentID>/fileName/<clientID> and clientID is not visible.
-        Assert.assertTrue("Invalid Key: " + next.getKey(),
-                next.getKey().startsWith(parentID + "/" + fileName));
-        OmKeyInfo omKeyInfo = next.getValue();
-        Assert.assertEquals("Invalid Key", fileName,
-                omKeyInfo.getFileName());
-        Assert.assertEquals("Invalid Key", fileName,
-                omKeyInfo.getKeyName());
-        Assert.assertEquals("Invalid Key", parentID,
-                omKeyInfo.getParentObjectID());
-      }
+      Assert.assertNotNull("Table is empty!", omKeyInfo);
+      // used startsWith because the key format is,
+      // <parentID>/fileName/<clientID> and clientID is not visible.
+      Assert.assertEquals("Invalid Key: " + omKeyInfo.getObjectInfo(),
+              omKeyInfo.getKeyName(), fileName);
+      Assert.assertEquals("Invalid Key", parentID,
+              omKeyInfo.getParentObjectID());
     }
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index db28ff7..055ab13 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -31,7 +31,6 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
-import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
@@ -2320,6 +2319,7 @@ public class KeyManagerImpl implements KeyManager {
     return fileStatusList;
   }
 
+  @SuppressWarnings("methodlength")
   public List<OzoneFileStatus> listStatusV1(OmKeyArgs args, boolean recursive,
       String startKey, long numEntries, String clientAddress)
           throws IOException {
@@ -2327,10 +2327,32 @@ public class KeyManagerImpl implements KeyManager {
 
     // unsorted OMKeyInfo list contains combine results from TableCache and DB.
     List<OzoneFileStatus> fileStatusFinalList = new ArrayList<>();
-    LinkedHashSet<OzoneFileStatus> fileStatusList = new LinkedHashSet<>();
+
     if (numEntries <= 0) {
       return fileStatusFinalList;
     }
+
+    /**
+     * A map sorted by OmKey to combine results from TableCache and DB for
+     * each entity - Dir & File.
+     *
+     * Two separate maps are required because the order of seek -> (1)Seek
+     * files in fileTable (2)Seek dirs in dirTable.
+     *
+     * StartKey should be added to the final listStatuses, so if we combine
+     * files and dirs into a single map then directory with lower precedence
+     * will appear at the top of the list even if the startKey is given as
+     * fileName.
+     *
+     * For example, startKey="a/file1". As per the seek order, first fetches
+     * all the files and then it will start seeking all the directories.
+     * Assume a directory name exists "a/b". With one map, the sorted list will
+     * be ["a/b", "a/file1"]. But the expected list is: ["a/file1", "a/b"],
+     * startKey element should always be at the top of the listStatuses.
+     */
+    TreeMap<String, OzoneFileStatus> cacheFileMap = new TreeMap<>();
+    TreeMap<String, OzoneFileStatus> cacheDirMap = new TreeMap<>();
+
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
@@ -2373,12 +2395,12 @@ public class KeyManagerImpl implements KeyManager {
         seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
         seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
 
-        // Order of seek -> (1)Seek dirs in dirTable (2)Seek files in fileTable
+        // Order of seek -> (1)Seek files in fileTable (2)Seek dirs in dirTable
         // 1. Seek the given key in key table.
-        countEntries = getFilesFromDirectory(fileStatusList, seekFileInDB,
+        countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB,
                 prefixPath, prefixKeyInDB, startKey, countEntries, numEntries);
         // 2. Seek the given key in dir table.
-        getDirectories(fileStatusList, seekDirInDB, prefixPath, prefixKeyInDB,
+        getDirectories(cacheDirMap, seekDirInDB, prefixPath, prefixKeyInDB,
                 startKey, countEntries, numEntries, volumeName, bucketName,
                 recursive);
       } else {
@@ -2420,7 +2442,7 @@ public class KeyManagerImpl implements KeyManager {
             // dirTable. So, its not required to search again in the fileTable.
 
             // Seek the given key in dirTable.
-            getDirectories(fileStatusList, seekDirInDB, prefixPath,
+            getDirectories(cacheDirMap, seekDirInDB, prefixPath,
                     prefixKeyInDB, startKey, countEntries, numEntries,
                     volumeName, bucketName, recursive);
           } else {
@@ -2430,11 +2452,11 @@ public class KeyManagerImpl implements KeyManager {
             seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
 
             // 1. Seek the given key in key table.
-            countEntries = getFilesFromDirectory(fileStatusList, seekFileInDB,
+            countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB,
                     prefixPath, prefixKeyInDB, startKey, countEntries,
                     numEntries);
             // 2. Seek the given key in dir table.
-            getDirectories(fileStatusList, seekDirInDB, prefixPath,
+            getDirectories(cacheDirMap, seekDirInDB, prefixPath,
                     prefixKeyInDB, startKey, countEntries, numEntries,
                     volumeName, bucketName, recursive);
           }
@@ -2451,12 +2473,16 @@ public class KeyManagerImpl implements KeyManager {
       metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
               bucketName);
     }
-    List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
-    for (OzoneFileStatus fileStatus : fileStatusList) {
-      if (fileStatus.isFile()) {
-        keyInfoList.add(fileStatus.getKeyInfo());
-      }
+
+    List<OmKeyInfo> keyInfoList = new ArrayList<>();
+    for (OzoneFileStatus fileStatus : cacheFileMap.values()) {
+      fileStatusFinalList.add(fileStatus);
+      keyInfoList.add(fileStatus.getKeyInfo());
+    }
+    for (OzoneFileStatus fileStatus : cacheDirMap.values()) {
+      fileStatusFinalList.add(fileStatus);
     }
+
     // refreshPipeline flag check has been removed as part of
     // https://issues.apache.org/jira/browse/HDDS-3658.
     // Please refer this jira for more details.
@@ -2464,20 +2490,23 @@ public class KeyManagerImpl implements KeyManager {
     if (args.getSortDatanodes()) {
       sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
     }
-    fileStatusFinalList.addAll(fileStatusList);
     return fileStatusFinalList;
   }
 
   @SuppressWarnings("parameternumber")
-  protected int getDirectories(Set<OzoneFileStatus> fileStatusList,
+  protected int getDirectories(
+      TreeMap<String, OzoneFileStatus> cacheKeyMap,
       String seekDirInDB, String prefixPath, long prefixKeyInDB,
       String startKey, int countEntries, long numEntries, String volumeName,
       String bucketName, boolean recursive) throws IOException {
 
+    // A set to keep track of keys deleted in cache but not flushed to DB.
+    Set<String> deletedKeySet = new TreeSet<>();
+
     Table dirTable = metadataManager.getDirectoryTable();
-    countEntries = listStatusFindDirsInTableCache(fileStatusList, dirTable,
+    countEntries = listStatusFindDirsInTableCache(cacheKeyMap, dirTable,
             prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName,
-            bucketName, countEntries, numEntries);
+            bucketName, countEntries, numEntries, deletedKeySet);
     TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
             iterator = dirTable.iterator();
 
@@ -2485,6 +2514,11 @@ public class KeyManagerImpl implements KeyManager {
 
     while (iterator.hasNext() && numEntries - countEntries > 0) {
       OmDirectoryInfo dirInfo = iterator.value().getValue();
+      if (deletedKeySet.contains(dirInfo.getPath())) {
+        iterator.next(); // move to next entry in the table
+        // entry is actually deleted in cache and can exists in DB
+        continue;
+      }
       if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
               prefixKeyInDB)) {
         break;
@@ -2496,7 +2530,7 @@ public class KeyManagerImpl implements KeyManager {
                 dirInfo.getName());
         OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
                 bucketName, dirInfo, dirName);
-        fileStatusList.add(new OzoneFileStatus(omKeyInfo, scmBlockSize,
+        cacheKeyMap.put(dirName, new OzoneFileStatus(omKeyInfo, scmBlockSize,
                 true));
         countEntries++;
       }
@@ -2507,20 +2541,28 @@ public class KeyManagerImpl implements KeyManager {
     return countEntries;
   }
 
-  private int getFilesFromDirectory(Set<OzoneFileStatus> fileStatusList,
+  private int getFilesFromDirectory(
+      TreeMap<String, OzoneFileStatus> cacheKeyMap,
       String seekKeyInDB, String prefixKeyPath, long prefixKeyInDB,
       String startKey, int countEntries, long numEntries) throws IOException {
 
+    // A set to keep track of keys deleted in cache but not flushed to DB.
+    Set<String> deletedKeySet = new TreeSet<>();
+
     Table<String, OmKeyInfo> keyTable = metadataManager.getKeyTable();
-    countEntries = listStatusFindFilesInTableCache(fileStatusList, keyTable,
+    countEntries = listStatusFindFilesInTableCache(cacheKeyMap, keyTable,
             prefixKeyInDB, seekKeyInDB, prefixKeyPath, startKey,
-            countEntries, numEntries);
+            countEntries, numEntries, deletedKeySet);
     TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
             iterator = keyTable.iterator();
     iterator.seek(seekKeyInDB);
     while (iterator.hasNext() && numEntries - countEntries > 0) {
       OmKeyInfo keyInfo = iterator.value().getValue();
-
+      if (deletedKeySet.contains(keyInfo.getPath())) {
+        iterator.next(); // move to next entry in the table
+        // entry is actually deleted in cache and can exists in DB
+        continue;
+      }
       if (!OMFileRequest.isImmediateChild(keyInfo.getParentObjectID(),
               prefixKeyInDB)) {
         break;
@@ -2530,7 +2572,8 @@ public class KeyManagerImpl implements KeyManager {
       String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
               keyInfo.getKeyName());
       keyInfo.setKeyName(fullKeyPath);
-      fileStatusList.add(new OzoneFileStatus(keyInfo, scmBlockSize, false));
+      cacheKeyMap.put(fullKeyPath,
+              new OzoneFileStatus(keyInfo, scmBlockSize, false));
       countEntries++;
       iterator.next(); // move to next entry in the table
     }
@@ -2542,10 +2585,10 @@ public class KeyManagerImpl implements KeyManager {
    */
   @SuppressWarnings("parameternumber")
   private int listStatusFindFilesInTableCache(
-          Set<OzoneFileStatus> fileStatusList, Table<String,
+          TreeMap<String, OzoneFileStatus> cacheKeyMap, Table<String,
           OmKeyInfo> keyTable, long prefixKeyInDB, String seekKeyInDB,
           String prefixKeyPath, String startKey, int countEntries,
-          long numEntries) {
+          long numEntries, Set<String> deletedKeySet) {
 
     Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
             cacheIter = keyTable.cacheIterator();
@@ -2558,6 +2601,7 @@ public class KeyManagerImpl implements KeyManager {
       OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue();
       // cacheOmKeyInfo is null if an entry is deleted in cache
       if(cacheOmKeyInfo == null){
+        deletedKeySet.add(cacheKey);
         continue;
       }
 
@@ -2571,7 +2615,7 @@ public class KeyManagerImpl implements KeyManager {
               omKeyInfo.getKeyName());
       omKeyInfo.setKeyName(fullKeyPath);
 
-      countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
+      countEntries = addKeyInfoToFileStatusList(cacheKeyMap, prefixKeyInDB,
               seekKeyInDB, startKey, countEntries, cacheKey, omKeyInfo,
               false);
     }
@@ -2583,10 +2627,11 @@ public class KeyManagerImpl implements KeyManager {
    */
   @SuppressWarnings("parameternumber")
   private int listStatusFindDirsInTableCache(
-          Set<OzoneFileStatus> fileStatusList, Table<String,
+          TreeMap<String, OzoneFileStatus> cacheKeyMap, Table<String,
           OmDirectoryInfo> dirTable, long prefixKeyInDB, String seekKeyInDB,
           String prefixKeyPath, String startKey, String volumeName,
-          String bucketName, int countEntries, long numEntries) {
+          String bucketName, int countEntries, long numEntries,
+          Set<String> deletedKeySet) {
 
     Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
             cacheIter = dirTable.cacheIterator();
@@ -2599,7 +2644,9 @@ public class KeyManagerImpl implements KeyManager {
               cacheIter.next();
       String cacheKey = entry.getKey().getCacheKey();
       OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue();
+      // cacheOmKeyInfo is null if an entry is deleted in cache
       if(cacheOmDirInfo == null){
+        deletedKeySet.add(cacheKey);
         continue;
       }
       String fullDirPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
@@ -2607,7 +2654,7 @@ public class KeyManagerImpl implements KeyManager {
       OmKeyInfo cacheDirKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
               bucketName, cacheOmDirInfo, fullDirPath);
 
-      countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
+      countEntries = addKeyInfoToFileStatusList(cacheKeyMap, prefixKeyInDB,
               seekKeyInDB, startKey, countEntries, cacheKey, cacheDirKeyInfo,
               true);
     }
@@ -2615,7 +2662,8 @@ public class KeyManagerImpl implements KeyManager {
   }
 
   @SuppressWarnings("parameternumber")
-  private int addKeyInfoToFileStatusList(Set<OzoneFileStatus> fileStatusList,
+  private int addKeyInfoToFileStatusList(
+      TreeMap<String, OzoneFileStatus> cacheKeyMap,
       long prefixKeyInDB, String seekKeyInDB, String startKey,
       int countEntries, String cacheKey, OmKeyInfo cacheOmKeyInfo,
       boolean isDirectory) {
@@ -2627,7 +2675,7 @@ public class KeyManagerImpl implements KeyManager {
       if (cacheKey.startsWith(seekKeyInDB)) {
         OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
                 scmBlockSize, isDirectory);
-        fileStatusList.add(fileStatus);
+        cacheKeyMap.put(cacheOmKeyInfo.getKeyName(), fileStatus);
         countEntries++;
       }
     } else {
@@ -2641,7 +2689,7 @@ public class KeyManagerImpl implements KeyManager {
               cacheKey.compareTo(seekKeyInDB) >= 0) {
         OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
                 scmBlockSize, isDirectory);
-        fileStatusList.add(fileStatus);
+        cacheKeyMap.put(cacheOmKeyInfo.getKeyName(), fileStatus);
         countEntries++;
       }
     }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
index af3bc82..af5c4df 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
@@ -119,6 +120,11 @@ public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest {
       }
 
       OmKeyInfo omKeyInfo = keyStatus.getKeyInfo();
+      // New key format for the fileTable & dirTable.
+      // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+      // keyName field stores only the leaf node name, which is 'file1'.
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      omKeyInfo.setKeyName(fileName);
 
       // Set the UpdateID to current transactionLogIndex
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 18/19: HDDS-4813. [FSO]S3Multipart: Implement UploadCompleteRequest (#1923)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit fd393dacf9b453f4429b2cab9f15d5b9dcda4555
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Feb 17 20:50:05 2021 +0530

    HDDS-4813. [FSO]S3Multipart: Implement UploadCompleteRequest (#1923)
---
 .../rpc/TestOzoneClientMultipartUploadV1.java      | 274 ++++++++++++++--
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../ozone/om/request/file/OMFileRequest.java       |   4 +-
 .../ozone/om/request/key/OMKeyCommitRequestV1.java |   2 +-
 .../S3MultipartUploadCommitPartRequestV1.java      |  39 ++-
 .../S3MultipartUploadCompleteRequest.java          | 348 +++++++++++++--------
 .../S3MultipartUploadCompleteRequestV1.java        | 268 ++++++++++++++++
 .../S3MultipartUploadCompleteResponse.java         |  13 +-
 ...va => S3MultipartUploadCompleteResponseV1.java} |  60 ++--
 .../s3/multipart/TestS3MultipartRequest.java       |   9 +-
 .../TestS3MultipartUploadCompleteRequest.java      | 118 ++++++-
 .../TestS3MultipartUploadCompleteRequestV1.java    | 132 ++++++++
 .../s3/multipart/TestS3MultipartResponse.java      |  21 ++
 .../TestS3MultipartUploadCompleteResponseV1.java   | 257 +++++++++++++++
 14 files changed, 1330 insertions(+), 219 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
index af241c5..1ab2cc3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
@@ -17,24 +17,29 @@
 
 package org.apache.hadoop.ozone.client.rpc;
 
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneTestUtils;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
 import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
 
+import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -44,10 +49,15 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.TreeMap;
 import java.util.UUID;
 
 import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
 import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
 
 /**
  * This test verifies all the S3 multipart client apis - layout version V1.
@@ -133,24 +143,24 @@ public class TestOzoneClientMultipartUploadV1 {
     OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
             STAND_ALONE, ONE);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     String uploadID = multipartInfo.getUploadID();
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotNull(multipartInfo.getUploadID());
 
     // Call initiate multipart upload for the same key again, this should
     // generate a new uploadID.
     multipartInfo = bucket.initiateMultipartUpload(keyName,
             STAND_ALONE, ONE);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotEquals(multipartInfo.getUploadID(), uploadID);
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotEquals(multipartInfo.getUploadID(), uploadID);
+    Assert.assertNotNull(multipartInfo.getUploadID());
   }
 
   @Test
@@ -166,23 +176,23 @@ public class TestOzoneClientMultipartUploadV1 {
     OzoneBucket bucket = volume.getBucket(bucketName);
     OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     String uploadID = multipartInfo.getUploadID();
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotNull(multipartInfo.getUploadID());
 
     // Call initiate multipart upload for the same key again, this should
     // generate a new uploadID.
     multipartInfo = bucket.initiateMultipartUpload(keyName);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotEquals(multipartInfo.getUploadID(), uploadID);
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotEquals(multipartInfo.getUploadID(), uploadID);
+    Assert.assertNotNull(multipartInfo.getUploadID());
   }
 
   @Test
@@ -199,12 +209,12 @@ public class TestOzoneClientMultipartUploadV1 {
     OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
             STAND_ALONE, ONE);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     String uploadID = multipartInfo.getUploadID();
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotNull(multipartInfo.getUploadID());
 
     OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
             sampleData.length(), 1, uploadID);
@@ -214,8 +224,8 @@ public class TestOzoneClientMultipartUploadV1 {
     OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
             .getCommitUploadPartInfo();
 
-    assertNotNull(commitUploadPartInfo);
-    assertNotNull(commitUploadPartInfo.getPartName());
+    Assert.assertNotNull(commitUploadPartInfo);
+    Assert.assertNotNull(commitUploadPartInfo.getPartName());
   }
 
   @Test
@@ -233,12 +243,12 @@ public class TestOzoneClientMultipartUploadV1 {
     OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
             ReplicationType.RATIS, THREE);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     String uploadID = multipartInfo.getUploadID();
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotNull(multipartInfo.getUploadID());
 
     int partNumber = 1;
 
@@ -250,9 +260,9 @@ public class TestOzoneClientMultipartUploadV1 {
     OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
             .getCommitUploadPartInfo();
 
-    assertNotNull(commitUploadPartInfo);
+    Assert.assertNotNull(commitUploadPartInfo);
     String partName = commitUploadPartInfo.getPartName();
-    assertNotNull(commitUploadPartInfo.getPartName());
+    Assert.assertNotNull(commitUploadPartInfo.getPartName());
 
     //Overwrite the part by creating part key with same part number.
     sampleData = "sample Data Changed";
@@ -264,12 +274,230 @@ public class TestOzoneClientMultipartUploadV1 {
     commitUploadPartInfo = ozoneOutputStream
             .getCommitUploadPartInfo();
 
-    assertNotNull(commitUploadPartInfo);
-    assertNotNull(commitUploadPartInfo.getPartName());
+    Assert.assertNotNull(commitUploadPartInfo);
+    Assert.assertNotNull(commitUploadPartInfo.getPartName());
 
     // PartName should be different from old part Name.
-    assertNotEquals("Part names should be different", partName,
+    Assert.assertNotEquals("Part names should be different", partName,
             commitUploadPartInfo.getPartName());
   }
 
+  @Test
+  public void testMultipartUploadWithPartsLessThanMinSize() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    // Initiate multipart upload
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    // Upload Parts
+    Map<Integer, String> partsMap = new TreeMap<>();
+    // Uploading part 1 with less than min size
+    String partName = uploadPart(bucket, keyName, uploadID, 1,
+            "data".getBytes(UTF_8));
+    partsMap.put(1, partName);
+
+    partName = uploadPart(bucket, keyName, uploadID, 2,
+            "data".getBytes(UTF_8));
+    partsMap.put(2, partName);
+
+    // Complete multipart upload
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent()
+          throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    // We have not uploaded any parts, but passing some list it should throw
+    // error.
+    TreeMap<Integer, String> partsMap = new TreeMap<>();
+    partsMap.put(1, UUID.randomUUID().toString());
+
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName()
+          throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
+
+    // passing with an incorrect part name, should throw INVALID_PART error.
+    TreeMap<Integer, String> partsMap = new TreeMap<>();
+    partsMap.put(1, UUID.randomUUID().toString());
+
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testMultipartUploadWithMissingParts() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
+
+    // passing with an incorrect part number, should throw INVALID_PART error.
+    TreeMap<Integer, String> partsMap = new TreeMap<>();
+    partsMap.put(3, "random");
+
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testCommitPartAfterCompleteUpload() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/";
+    String keyName = parentDir + UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    Assert.assertNotNull(omMultipartInfo.getUploadID());
+
+    String uploadID = omMultipartInfo.getUploadID();
+
+    // upload part 1.
+    byte[] data = generateData(5 * 1024 * 1024,
+            (byte) RandomUtils.nextLong());
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            data.length, 1, uploadID);
+    ozoneOutputStream.write(data, 0, data.length);
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
+            ozoneOutputStream.getCommitUploadPartInfo();
+
+    // Do not close output stream for part 2.
+    ozoneOutputStream = bucket.createMultipartKey(keyName,
+            data.length, 2, omMultipartInfo.getUploadID());
+    ozoneOutputStream.write(data, 0, data.length);
+
+    Map<Integer, String> partsMap = new LinkedHashMap<>();
+    partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
+    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
+            bucket.completeMultipartUpload(keyName,
+                    uploadID, partsMap);
+    Assert.assertNotNull(omMultipartUploadCompleteInfo);
+
+    Assert.assertNotNull(omMultipartCommitUploadPartInfo);
+
+    byte[] fileContent = new byte[data.length];
+    OzoneInputStream inputStream = bucket.readKey(keyName);
+    inputStream.read(fileContent);
+    StringBuilder sb = new StringBuilder(data.length);
+
+    // Combine all parts data, and check is it matching with get key data.
+    String part1 = new String(data, UTF_8);
+    sb.append(part1);
+    Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
+
+    try {
+      ozoneOutputStream.close();
+      Assert.fail("testCommitPartAfterCompleteUpload failed");
+    } catch (IOException ex) {
+      Assert.assertTrue(ex instanceof OMException);
+      Assert.assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
+              ((OMException) ex).getResult());
+    }
+  }
+
+  private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
+      ReplicationType replicationType, ReplicationFactor replicationFactor)
+          throws Exception {
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            replicationType, replicationFactor);
+
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertNotNull(uploadID);
+
+    return uploadID;
+  }
+
+  private String uploadPart(OzoneBucket bucket, String keyName, String
+      uploadID, int partNumber, byte[] data) throws Exception {
+
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            data.length, partNumber, uploadID);
+    ozoneOutputStream.write(data, 0,
+            data.length);
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
+            ozoneOutputStream.getCommitUploadPartInfo();
+
+    Assert.assertNotNull(omMultipartCommitUploadPartInfo);
+    Assert.assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
+
+    return omMultipartCommitUploadPartInfo.getPartName();
+  }
+
+  private void completeMultipartUpload(OzoneBucket bucket, String keyName,
+      String uploadID, Map<Integer, String> partsMap) throws Exception {
+    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket
+            .completeMultipartUpload(keyName, uploadID, partsMap);
+
+    Assert.assertNotNull(omMultipartUploadCompleteInfo);
+    Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), bucket
+            .getName());
+    Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), bucket
+            .getVolumeName());
+    Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), keyName);
+    Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash());
+  }
+
+  private byte[] generateData(int size, byte val) {
+    byte[] chars = new byte[size];
+    Arrays.fill(chars, val);
+    return chars;
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 40a5396..2b39b8d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortReq
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest;
 import org.apache.hadoop.ozone.om.request.security.OMCancelDelegationTokenRequest;
 import org.apache.hadoop.ozone.om.request.security.OMGetDelegationTokenRequest;
@@ -196,6 +197,9 @@ public final class OzoneManagerRatisUtils {
     case AbortMultiPartUpload:
       return new S3MultipartUploadAbortRequest(omRequest);
     case CompleteMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3MultipartUploadCompleteRequestV1(omRequest);
+      }
       return new S3MultipartUploadCompleteRequest(omRequest);
     case AddAcl:
     case RemoveAcl:
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 7f2d2c5..ebf86ce 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -542,9 +542,10 @@ public final class OMFileRequest {
    * @param omMetadataMgr
    * @param batchOp
    * @param omFileInfo
+   * @return db file key
    * @throws IOException
    */
-  public static void addToFileTable(OMMetadataManager omMetadataMgr,
+  public static String addToFileTable(OMMetadataManager omMetadataMgr,
                                     BatchOperation batchOp,
                                     OmKeyInfo omFileInfo)
           throws IOException {
@@ -554,6 +555,7 @@ public final class OMFileRequest {
 
     omMetadataMgr.getKeyTable().putWithBatch(batchOp,
             dbFileKey, omFileInfo);
+    return dbFileKey;
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
index 3c49610..64991de 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -143,7 +143,7 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
       omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
 
       // Update the block length for each block
-      omKeyInfo.updateLocationInfoList(locationInfoList);
+      omKeyInfo.updateLocationInfoList(locationInfoList, false);
 
       // Set the UpdateID to current transactionLogIndex
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
index 5546010..7aa21cf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
@@ -86,7 +86,8 @@ public class S3MultipartUploadCommitPartRequestV1
     boolean acquiredLock = false;
 
     IOException exception = null;
-    String partName = null;
+    String dbPartName;
+    String fullKeyPartName = null;
     OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
         getOmRequest());
     OMClientResponse omClientResponse = null;
@@ -95,8 +96,8 @@ public class S3MultipartUploadCommitPartRequestV1
     OmKeyInfo omKeyInfo = null;
     String multipartKey = null;
     OmMultipartKeyInfo multipartKeyInfo = null;
-    Result result = null;
-    OmBucketInfo omBucketInfo = null;
+    Result result;
+    OmBucketInfo omBucketInfo;
     OmBucketInfo copyBucketInfo = null;
     try {
       keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
@@ -141,14 +142,24 @@ public class S3MultipartUploadCommitPartRequestV1
       omKeyInfo.setDataSize(keyArgs.getDataSize());
       omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream()
           .map(OmKeyLocationInfo::getFromProtobuf)
-          .collect(Collectors.toList()));
+          .collect(Collectors.toList()), true);
       // Set Modification time
       omKeyInfo.setModificationTime(keyArgs.getModificationTime());
       // Set the UpdateID to current transactionLogIndex
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
-      String ozoneKey = omMetadataManager.getOzonePathKey(parentID, fileName);
-      partName = ozoneKey + clientID;
+      /**
+       * Format of PartName stored into MultipartInfoTable is,
+       * "fileName + ClientID".
+       *
+       * Contract is that all part names present in a multipart info will
+       * have same key prefix path.
+       *
+       * For example:
+       *        /vol1/buck1/a/b/c/part-1, /vol1/buck1/a/b/c/part-2,
+       *        /vol1/buck1/a/b/c/part-n
+       */
+      dbPartName = fileName + clientID;
 
       if (multipartKeyInfo == null) {
         // This can occur when user started uploading part by the time commit
@@ -168,9 +179,9 @@ public class S3MultipartUploadCommitPartRequestV1
       // Build this multipart upload part info.
       OzoneManagerProtocolProtos.PartKeyInfo.Builder partKeyInfo =
           OzoneManagerProtocolProtos.PartKeyInfo.newBuilder();
-      partKeyInfo.setPartName(partName);
+      partKeyInfo.setPartName(dbPartName);
       partKeyInfo.setPartNumber(partNumber);
-      partKeyInfo.setPartKeyInfo(omKeyInfo.getProtobuf(
+      partKeyInfo.setPartKeyInfo(omKeyInfo.getProtobuf(fileName,
           getOmRequest().getVersion()));
 
       // Add this part information in to multipartKeyInfo.
@@ -207,9 +218,15 @@ public class S3MultipartUploadCommitPartRequestV1
           keyArgs.getKeyLocationsList().size() * scmBlockSize * factor;
       omBucketInfo.incrUsedBytes(correctedSpace);
 
+      // Prepare response. Sets user given full key part name in 'partName'
+      // attribute in response object.
+      String fullOzoneKeyName = omMetadataManager.getOzoneKey(
+              volumeName, bucketName, keyName);
+      fullKeyPartName = fullOzoneKeyName + clientID;
       omResponse.setCommitMultiPartUploadResponse(
           MultipartCommitUploadPartResponse.newBuilder()
-              .setPartName(partName));
+              .setPartName(fullKeyPartName));
+
       omClientResponse = new S3MultipartUploadCommitPartResponseV1(
           omResponse.build(), multipartKey, openKey,
           multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
@@ -234,8 +251,8 @@ public class S3MultipartUploadCommitPartRequestV1
     }
 
     logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs,
-            auditMap, volumeName, bucketName, keyName, exception, partName,
-            result);
+            auditMap, volumeName, bucketName, keyName, exception,
+            fullKeyPartName, result);
 
     return omClientResponse;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index 4590889..62c892d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -18,14 +18,12 @@
 
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
+import com.google.common.base.Optional;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -36,7 +34,9 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -49,15 +49,19 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMReque
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Optional;
-import org.apache.commons.codec.digest.DigestUtils;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Handle Multipart upload complete request.
@@ -169,129 +173,25 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
         }
 
         // First Check for Invalid Part Order.
-        int prevPartNumber = partsList.get(0).getPartNumber();
         List< Integer > partNumbers = new ArrayList<>();
-        int partsListSize = partsList.size();
-        partNumbers.add(prevPartNumber);
-        for (int i = 1; i < partsListSize; i++) {
-          int currentPartNumber = partsList.get(i).getPartNumber();
-          if (prevPartNumber >= currentPartNumber) {
-            LOG.error("PartNumber at index {} is {}, and its previous " +
-                    "partNumber at index {} is {} for ozonekey is " +
-                    "{}", i, currentPartNumber, i - 1, prevPartNumber,
-                ozoneKey);
-            throw new OMException(
-                failureMessage(requestedVolume, requestedBucket, keyName) +
-                " because parts are in Invalid order.",
-                OMException.ResultCodes.INVALID_PART_ORDER);
-          }
-          prevPartNumber = currentPartNumber;
-          partNumbers.add(prevPartNumber);
-        }
-
+        int partsListSize = getPartsListSize(requestedVolume,
+                requestedBucket, keyName, ozoneKey, partNumbers, partsList);
 
         List<OmKeyLocationInfo> partLocationInfos = new ArrayList<>();
-        long dataSize = 0;
-        int currentPartCount = 0;
-        // Now do actual logic, and check for any Invalid part during this.
-        for (OzoneManagerProtocolProtos.Part part : partsList) {
-          currentPartCount++;
-          int partNumber = part.getPartNumber();
-          String partName = part.getPartName();
-
-          PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber);
-
-          if (partKeyInfo == null ||
-              !partName.equals(partKeyInfo.getPartName())) {
-            String omPartName = partKeyInfo == null ? null :
-                partKeyInfo.getPartName();
-            throw new OMException(
-                failureMessage(requestedVolume, requestedBucket, keyName) +
-                ". Provided Part info is { " + partName + ", " + partNumber +
-                "}, whereas OM has partName " + omPartName,
-                OMException.ResultCodes.INVALID_PART);
-          }
-
-          OmKeyInfo currentPartKeyInfo = OmKeyInfo
-              .getFromProtobuf(partKeyInfo.getPartKeyInfo());
-
-          // Except for last part all parts should have minimum size.
-          if (currentPartCount != partsListSize) {
-            if (currentPartKeyInfo.getDataSize() <
-                ozoneManager.getMinMultipartUploadPartSize()) {
-              LOG.error("MultipartUpload: {} Part number: {} size {}  is less" +
-                      " than minimum part size {}", ozoneKey,
-                  partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(),
-                  ozoneManager.getMinMultipartUploadPartSize());
-              throw new OMException(
-                  failureMessage(requestedVolume, requestedBucket, keyName) +
-                  ". Entity too small.",
-                  OMException.ResultCodes.ENTITY_TOO_SMALL);
-            }
-          }
-
-          // As all part keys will have only one version.
-          OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo
-              .getKeyLocationVersions().get(0);
-
-          // Set partNumber in each block.
-          currentKeyInfoGroup.getLocationList().forEach(
-              omKeyLocationInfo -> omKeyLocationInfo.setPartNumber(partNumber));
-
-          partLocationInfos.addAll(currentKeyInfoGroup.getLocationList());
-          dataSize += currentPartKeyInfo.getDataSize();
-        }
+        long dataSize = getMultipartDataSize(requestedVolume, requestedBucket,
+                keyName, ozoneKey, partKeyInfoMap, partsListSize,
+                partLocationInfos, partsList, ozoneManager);
 
         // All parts have same replication information. Here getting from last
         // part.
-        HddsProtos.ReplicationType type = partKeyInfoMap.lastEntry().getValue()
-            .getPartKeyInfo().getType();
-        HddsProtos.ReplicationFactor factor =
-            partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor();
-
-        OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-        if (omKeyInfo == null) {
-          // This is a newly added key, it does not have any versions.
-          OmKeyLocationInfoGroup keyLocationInfoGroup = new
-              OmKeyLocationInfoGroup(0, partLocationInfos, true);
-
-          // Get the objectID of the key from OpenKeyTable
-          OmKeyInfo dbOpenKeyInfo = omMetadataManager.getOpenKeyTable()
-              .get(multipartKey);
-
-          // A newly created key, this is the first version.
-          OmKeyInfo.Builder builder =
-              new OmKeyInfo.Builder().setVolumeName(volumeName)
-              .setBucketName(bucketName).setKeyName(keyName)
-              .setReplicationFactor(factor).setReplicationType(type)
-              .setCreationTime(keyArgs.getModificationTime())
-              .setModificationTime(keyArgs.getModificationTime())
-              .setDataSize(dataSize)
-              .setFileEncryptionInfo(dbOpenKeyInfo.getFileEncryptionInfo())
-              .setOmKeyLocationInfos(
-                  Collections.singletonList(keyLocationInfoGroup))
-              .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
-          // Check if db entry has ObjectID. This check is required because
-          // it is possible that between multipart key uploads and complete,
-          // we had an upgrade.
-          if (dbOpenKeyInfo.getObjectID() != 0) {
-            builder.setObjectID(dbOpenKeyInfo.getObjectID());
-          }
-          omKeyInfo = builder.build();
-        } else {
-          // Already a version exists, so we should add it as a new version.
-          // But now as versioning is not supported, just following the commit
-          // key approach. When versioning support comes, then we can uncomment
-          // below code keyInfo.addNewVersion(locations);
-          omKeyInfo.updateLocationInfoList(partLocationInfos, true);
-          omKeyInfo.setModificationTime(keyArgs.getModificationTime());
-          omKeyInfo.setDataSize(dataSize);
-        }
-        omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+        OmKeyInfo omKeyInfo = getOmKeyInfo(ozoneManager, trxnLogIndex, keyArgs,
+                volumeName, bucketName, keyName, multipartKey,
+                omMetadataManager, ozoneKey, partKeyInfoMap, partLocationInfos,
+                dataSize);
 
         //Find all unused parts.
-        List< OmKeyInfo > unUsedParts = new ArrayList<>();
-        for (Map.Entry< Integer, PartKeyInfo > partKeyInfo :
+        List<OmKeyInfo> unUsedParts = new ArrayList<>();
+        for (Map.Entry< Integer, PartKeyInfo> partKeyInfo :
             partKeyInfoMap.entrySet()) {
           if (!partNumbers.contains(partKeyInfo.getKey())) {
             unUsedParts.add(OmKeyInfo
@@ -334,6 +234,19 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
       }
     }
 
+    logResult(ozoneManager, multipartUploadCompleteRequest, partsList,
+            auditMap, volumeName, bucketName, keyName, exception, result);
+
+    return omClientResponse;
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected void logResult(OzoneManager ozoneManager,
+      MultipartUploadCompleteRequest multipartUploadCompleteRequest,
+      List<OzoneManagerProtocolProtos.Part> partsList,
+      Map<String, String> auditMap, String volumeName,
+      String bucketName, String keyName, IOException exception,
+      Result result) {
     auditMap.put(OzoneConsts.MULTIPART_LIST, partsList.toString());
 
     // audit log
@@ -356,8 +269,183 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
       LOG.error("Unrecognized Result for S3MultipartUploadCommitRequest: {}",
           multipartUploadCompleteRequest);
     }
+  }
 
-    return omClientResponse;
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex,
+      KeyArgs keyArgs, String volumeName, String bucketName, String keyName,
+      String multipartKey, OMMetadataManager omMetadataManager,
+      String ozoneKey, TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
+      List<OmKeyLocationInfo> partLocationInfos, long dataSize)
+          throws IOException {
+    HddsProtos.ReplicationType type = partKeyInfoMap.lastEntry().getValue()
+        .getPartKeyInfo().getType();
+    HddsProtos.ReplicationFactor factor =
+        partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor();
+
+    OmKeyInfo omKeyInfo = getOmKeyInfoFromKeyTable(ozoneKey, keyName,
+            omMetadataManager);
+    if (omKeyInfo == null) {
+      // This is a newly added key, it does not have any versions.
+      OmKeyLocationInfoGroup keyLocationInfoGroup = new
+          OmKeyLocationInfoGroup(0, partLocationInfos, true);
+
+      // Get the objectID of the key from OpenKeyTable
+      OmKeyInfo dbOpenKeyInfo = getOmKeyInfoFromOpenKeyTable(multipartKey,
+              keyName, omMetadataManager);
+
+      // A newly created key, this is the first version.
+      OmKeyInfo.Builder builder =
+          new OmKeyInfo.Builder().setVolumeName(volumeName)
+          .setBucketName(bucketName).setKeyName(dbOpenKeyInfo.getKeyName())
+          .setReplicationFactor(factor).setReplicationType(type)
+          .setCreationTime(keyArgs.getModificationTime())
+          .setModificationTime(keyArgs.getModificationTime())
+          .setDataSize(dataSize)
+          .setFileEncryptionInfo(dbOpenKeyInfo.getFileEncryptionInfo())
+          .setOmKeyLocationInfos(
+              Collections.singletonList(keyLocationInfoGroup))
+          .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
+      // Check if db entry has ObjectID. This check is required because
+      // it is possible that between multipart key uploads and complete,
+      // we had an upgrade.
+      if (dbOpenKeyInfo.getObjectID() != 0) {
+        builder.setObjectID(dbOpenKeyInfo.getObjectID());
+      }
+      updatePrefixFSOInfo(dbOpenKeyInfo, builder);
+      omKeyInfo = builder.build();
+    } else {
+      // Already a version exists, so we should add it as a new version.
+      // But now as versioning is not supported, just following the commit
+      // key approach. When versioning support comes, then we can uncomment
+      // below code keyInfo.addNewVersion(locations);
+      omKeyInfo.updateLocationInfoList(partLocationInfos, true);
+      omKeyInfo.setModificationTime(keyArgs.getModificationTime());
+      omKeyInfo.setDataSize(dataSize);
+    }
+    omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+    return omKeyInfo;
+  }
+
+  protected void updatePrefixFSOInfo(OmKeyInfo dbOpenKeyInfo,
+      OmKeyInfo.Builder builder) {
+    // FSOBucket is disabled. Do nothing.
+  }
+
+  protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return omMetadataManager.getKeyTable().get(dbOzoneKey);
+  }
+
+  protected OmKeyInfo getOmKeyInfoFromOpenKeyTable(String dbMultipartKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return omMetadataManager.getOpenKeyTable().get(dbMultipartKey);
+  }
+
+  protected int getPartsListSize(String requestedVolume,
+      String requestedBucket, String keyName, String ozoneKey,
+      List<Integer> partNumbers,
+      List<OzoneManagerProtocolProtos.Part> partsList) throws OMException {
+    int prevPartNumber = partsList.get(0).getPartNumber();
+    int partsListSize = partsList.size();
+    partNumbers.add(prevPartNumber);
+    for (int i = 1; i < partsListSize; i++) {
+      int currentPartNumber = partsList.get(i).getPartNumber();
+      if (prevPartNumber >= currentPartNumber) {
+        LOG.error("PartNumber at index {} is {}, and its previous " +
+                "partNumber at index {} is {} for ozonekey is " +
+                "{}", i, currentPartNumber, i - 1, prevPartNumber,
+            ozoneKey);
+        throw new OMException(
+            failureMessage(requestedVolume, requestedBucket, keyName) +
+            " because parts are in Invalid order.",
+            OMException.ResultCodes.INVALID_PART_ORDER);
+      }
+      prevPartNumber = currentPartNumber;
+      partNumbers.add(prevPartNumber);
+    }
+    return partsListSize;
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected long getMultipartDataSize(String requestedVolume,
+      String requestedBucket, String keyName, String ozoneKey,
+      TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
+      int partsListSize, List<OmKeyLocationInfo> partLocationInfos,
+      List<OzoneManagerProtocolProtos.Part> partsList,
+      OzoneManager ozoneManager) throws OMException {
+    long dataSize = 0;
+    int currentPartCount = 0;
+    // Now do actual logic, and check for any Invalid part during this.
+    for (OzoneManagerProtocolProtos.Part part : partsList) {
+      currentPartCount++;
+      int partNumber = part.getPartNumber();
+      String partName = part.getPartName();
+
+      PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber);
+
+      String dbPartName = null;
+      if (partKeyInfo != null) {
+        dbPartName = preparePartName(requestedVolume, requestedBucket, keyName,
+                partKeyInfo, ozoneManager.getMetadataManager());
+      }
+      if (!StringUtils.equals(partName, dbPartName)) {
+        String omPartName = partKeyInfo == null ? null : dbPartName;
+        throw new OMException(
+            failureMessage(requestedVolume, requestedBucket, keyName) +
+            ". Provided Part info is { " + partName + ", " + partNumber +
+            "}, whereas OM has partName " + omPartName,
+            OMException.ResultCodes.INVALID_PART);
+      }
+
+      OmKeyInfo currentPartKeyInfo = OmKeyInfo
+          .getFromProtobuf(partKeyInfo.getPartKeyInfo());
+
+      // Except for last part all parts should have minimum size.
+      if (currentPartCount != partsListSize) {
+        if (currentPartKeyInfo.getDataSize() <
+            ozoneManager.getMinMultipartUploadPartSize()) {
+          LOG.error("MultipartUpload: {} Part number: {} size {}  is less" +
+                  " than minimum part size {}", ozoneKey,
+              partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(),
+              ozoneManager.getMinMultipartUploadPartSize());
+          throw new OMException(
+              failureMessage(requestedVolume, requestedBucket, keyName) +
+                  ". Entity too small.",
+              OMException.ResultCodes.ENTITY_TOO_SMALL);
+        }
+      }
+
+      // As all part keys will have only one version.
+      OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo
+          .getKeyLocationVersions().get(0);
+
+      // Set partNumber in each block.
+      currentKeyInfoGroup.getLocationList().forEach(
+          omKeyLocationInfo -> omKeyLocationInfo.setPartNumber(partNumber));
+
+      partLocationInfos.addAll(currentKeyInfoGroup.getLocationList());
+      dataSize += currentPartKeyInfo.getDataSize();
+    }
+    return dataSize;
+  }
+
+  private String preparePartName(String requestedVolume,
+      String requestedBucket, String keyName, PartKeyInfo partKeyInfo,
+      OMMetadataManager omMetadataManager) {
+
+    String partName;
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      String parentPath = OzoneFSUtils.getParent(keyName);
+      StringBuffer keyPath = new StringBuffer(parentPath);
+      keyPath.append(partKeyInfo.getPartName());
+
+      partName = omMetadataManager.getOzoneKey(requestedVolume,
+              requestedBucket, keyPath.toString());
+    } else {
+      partName = partKeyInfo.getPartName();
+    }
+    return partName;
   }
 
   private static String failureMessage(String volume, String bucket,
@@ -366,7 +454,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
         volume + " bucket: " + bucket + " key: " + keyName;
   }
 
-  private void updateCache(OMMetadataManager omMetadataManager,
+  protected void updateCache(OMMetadataManager omMetadataManager,
       String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo,
       long transactionLogIndex) {
     // Update cache.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestV1.java
new file mode 100644
index 0000000..4ab9ee7
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestV1.java
@@ -0,0 +1,268 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import com.google.common.base.Optional;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
+
+/**
+ * Handle Multipart upload complete request.
+ */
+public class S3MultipartUploadCompleteRequestV1
+        extends S3MultipartUploadCompleteRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(S3MultipartUploadCompleteRequestV1.class);
+
+  public S3MultipartUploadCompleteRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    MultipartUploadCompleteRequest multipartUploadCompleteRequest =
+        getOmRequest().getCompleteMultiPartUploadRequest();
+
+    KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs();
+
+    List<OzoneManagerProtocolProtos.Part> partsList =
+        multipartUploadCompleteRequest.getPartsListList();
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    final String requestedVolume = volumeName;
+    final String requestedBucket = bucketName;
+    String keyName = keyArgs.getKeyName();
+    String uploadID = keyArgs.getMultipartUploadID();
+    String dbMultipartKey;
+
+    ozoneManager.getMetrics().incNumCompleteMultipartUploads();
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    boolean acquiredLock = false;
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    OMClientResponse omClientResponse = null;
+    IOException exception = null;
+    Result result;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      Path keyPath = Paths.get(keyName);
+      OMFileRequest.OMPathInfoV1 pathInfoV1 =
+              OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+                      volumeName, bucketName, keyName, keyPath);
+      long parentID = pathInfoV1.getLastKnownParentId();
+
+      dbMultipartKey = omMetadataManager.getMultipartKey(parentID,
+              fileName, uploadID);
+
+      String dbOzoneKey = omMetadataManager.getOzonePathKey(parentID, fileName);
+
+      String ozoneKey = omMetadataManager.getOzoneKey(
+              volumeName, bucketName, keyName);
+
+      OmMultipartKeyInfo multipartKeyInfo =
+              omMetadataManager.getMultipartInfoTable().get(dbMultipartKey);
+
+      // Check for directory exists with same name, if it exists throw error.
+      if (pathInfoV1.getDirectoryResult() == DIRECTORY_EXISTS) {
+        throw new OMException("Can not Complete MPU for file: " + keyName +
+                " as there is already directory in the given path",
+                NOT_A_FILE);
+      }
+
+      if (multipartKeyInfo == null) {
+        throw new OMException(
+            failureMessage(requestedVolume, requestedBucket, keyName),
+            OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
+      }
+      TreeMap<Integer, PartKeyInfo> partKeyInfoMap =
+          multipartKeyInfo.getPartKeyInfoMap();
+
+      if (partsList.size() > 0) {
+        if (partKeyInfoMap.size() == 0) {
+          LOG.error("Complete MultipartUpload failed for key {} , MPU Key has" +
+                  " no parts in OM, parts given to upload are {}", ozoneKey,
+              partsList);
+          throw new OMException(
+              failureMessage(requestedVolume, requestedBucket, keyName),
+              OMException.ResultCodes.INVALID_PART);
+        }
+
+        // First Check for Invalid Part Order.
+        List< Integer > partNumbers = new ArrayList<>();
+        int partsListSize = getPartsListSize(requestedVolume,
+                requestedBucket, keyName, ozoneKey, partNumbers, partsList);
+
+        List<OmKeyLocationInfo> partLocationInfos = new ArrayList<>();
+        long dataSize = getMultipartDataSize(requestedVolume, requestedBucket,
+                keyName, ozoneKey, partKeyInfoMap, partsListSize,
+                partLocationInfos, partsList, ozoneManager);
+
+        // All parts have same replication information. Here getting from last
+        // part.
+        OmKeyInfo omKeyInfo = getOmKeyInfo(ozoneManager, trxnLogIndex, keyArgs,
+                volumeName, bucketName, keyName, dbMultipartKey,
+                omMetadataManager, dbOzoneKey, partKeyInfoMap,
+                partLocationInfos, dataSize);
+
+        //Find all unused parts.
+        List< OmKeyInfo > unUsedParts = new ArrayList<>();
+        for (Map.Entry< Integer, PartKeyInfo > partKeyInfo :
+            partKeyInfoMap.entrySet()) {
+          if (!partNumbers.contains(partKeyInfo.getKey())) {
+            unUsedParts.add(OmKeyInfo
+                .getFromProtobuf(partKeyInfo.getValue().getPartKeyInfo()));
+          }
+        }
+
+        updateCache(omMetadataManager, dbOzoneKey, dbMultipartKey, omKeyInfo,
+            trxnLogIndex);
+
+        omResponse.setCompleteMultiPartUploadResponse(
+            MultipartUploadCompleteResponse.newBuilder()
+                .setVolume(requestedVolume)
+                .setBucket(requestedBucket)
+                .setKey(keyName)
+                .setHash(DigestUtils.sha256Hex(keyName)));
+
+        omClientResponse = new S3MultipartUploadCompleteResponseV1(
+            omResponse.build(), dbMultipartKey, omKeyInfo, unUsedParts);
+
+        result = Result.SUCCESS;
+      } else {
+        throw new OMException(
+            failureMessage(requestedVolume, requestedBucket, keyName) +
+            " because of empty part list",
+            OMException.ResultCodes.INVALID_REQUEST);
+      }
+
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new S3MultipartUploadCompleteResponseV1(
+          createErrorOMResponse(omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+          omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK,
+            volumeName, bucketName);
+      }
+    }
+
+    logResult(ozoneManager, multipartUploadCompleteRequest, partsList,
+            auditMap, volumeName, bucketName, keyName, exception, result);
+
+    return omClientResponse;
+  }
+
+  protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneFileKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+            omMetadataManager, dbOzoneFileKey, keyName);
+  }
+
+  @Override
+  protected OmKeyInfo getOmKeyInfoFromOpenKeyTable(String dbMultipartKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+            omMetadataManager, dbMultipartKey, keyName);
+  }
+
+  @Override
+  protected void updateCache(OMMetadataManager omMetadataManager,
+      String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo,
+      long transactionLogIndex) {
+    // Update cache.
+    // 1. Add key entry to key table.
+    // 2. Delete multipartKey entry from openKeyTable and multipartInfo table.
+    OMFileRequest.addFileTableCacheEntry(omMetadataManager, ozoneKey,
+            omKeyInfo, omKeyInfo.getFileName(), transactionLogIndex);
+
+    omMetadataManager.getOpenKeyTable().addCacheEntry(
+            new CacheKey<>(multipartKey),
+            new CacheValue<>(Optional.absent(), transactionLogIndex));
+    omMetadataManager.getMultipartInfoTable().addCacheEntry(
+            new CacheKey<>(multipartKey),
+            new CacheValue<>(Optional.absent(), transactionLogIndex));
+  }
+
+  protected void updatePrefixFSOInfo(OmKeyInfo dbOpenKeyInfo,
+                                     OmKeyInfo.Builder builder) {
+    // updates parentID and fileName
+    builder.setParentObjectID(dbOpenKeyInfo.getParentObjectID());
+    builder.setFileName(dbOpenKeyInfo.getFileName());
+  }
+
+  private static String failureMessage(String volume, String bucket,
+                                       String keyName) {
+    return "Complete Multipart Upload Failed: volume: " +
+        volume + " bucket: " + bucket + " key: " + keyName;
+  }
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
index 20e398e..f593885 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
@@ -96,4 +96,15 @@ public class S3MultipartUploadCompleteResponse extends OMClientResponse {
     }
   }
 
-}
\ No newline at end of file
+  protected String getMultipartKey() {
+    return multipartKey;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected List<OmKeyInfo> getPartsUnusedList() {
+    return partsUnusedList;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseV1.java
similarity index 66%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseV1.java
index 20e398e..bb31dce 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseV1.java
@@ -18,82 +18,74 @@
 
 package org.apache.hadoop.ozone.om.response.s3.multipart;
 
-import java.io.IOException;
-import java.util.List;
-
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
 import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
 
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
  * Response for Multipart Upload Complete request.
  */
-@CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE, KEY_TABLE, DELETED_TABLE,
-    MULTIPARTINFO_TABLE})
-public class S3MultipartUploadCompleteResponse extends OMClientResponse {
-  private String multipartKey;
-  private OmKeyInfo omKeyInfo;
-  private List<OmKeyInfo> partsUnusedList;
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, FILE_TABLE, DELETED_TABLE,
+    MULTIPARTFILEINFO_TABLE})
+public class S3MultipartUploadCompleteResponseV1
+        extends S3MultipartUploadCompleteResponse {
 
-  public S3MultipartUploadCompleteResponse(
+  public S3MultipartUploadCompleteResponseV1(
       @Nonnull OMResponse omResponse,
       @Nonnull String multipartKey,
       @Nonnull OmKeyInfo omKeyInfo,
       @Nonnull List<OmKeyInfo> unUsedParts) {
-    super(omResponse);
-    this.partsUnusedList = unUsedParts;
-    this.multipartKey = multipartKey;
-    this.omKeyInfo = omKeyInfo;
+    super(omResponse, multipartKey, omKeyInfo, unUsedParts);
   }
 
   /**
    * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
-  public S3MultipartUploadCompleteResponse(@Nonnull OMResponse omResponse) {
+  public S3MultipartUploadCompleteResponseV1(@Nonnull OMResponse omResponse) {
     super(omResponse);
     checkStatusNotOK();
   }
 
+
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
     omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-        multipartKey);
+            getMultipartKey());
     omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation,
-        multipartKey);
+            getMultipartKey());
 
-    String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-        omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
-    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey,
-        omKeyInfo);
+    String dbFileKey = OMFileRequest.addToFileTable(omMetadataManager,
+            batchOperation, getOmKeyInfo());
 
-    if (!partsUnusedList.isEmpty()) {
+    if (!getPartsUnusedList().isEmpty()) {
       // Add unused parts to deleted key table.
       RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable()
-          .get(ozoneKey);
+              .get(dbFileKey);
       if (repeatedOmKeyInfo == null) {
-        repeatedOmKeyInfo = new RepeatedOmKeyInfo(partsUnusedList);
+        repeatedOmKeyInfo = new RepeatedOmKeyInfo(getPartsUnusedList());
       } else {
-        repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo);
+        repeatedOmKeyInfo.addOmKeyInfo(getOmKeyInfo());
       }
 
       omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-          ozoneKey, repeatedOmKeyInfo);
+              dbFileKey, repeatedOmKeyInfo);
     }
   }
+}
 
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index 9f6cff8..16cb4ae 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -115,7 +115,7 @@ public class TestS3MultipartRequest {
             keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(omRequest);
+        getS3InitiateMultipartUploadReq(omRequest);
 
     OMRequest modifiedRequest =
         s3InitiateMultipartUploadRequest.preExecute(ozoneManager);
@@ -204,7 +204,7 @@ public class TestS3MultipartRequest {
             keyName, multipartUploadID, partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(omRequest);
+            getS3MultipartUploadCompleteReq(omRequest);
 
     OMRequest modifiedRequest =
         s3MultipartUploadCompleteRequest.preExecute(ozoneManager);
@@ -247,6 +247,11 @@ public class TestS3MultipartRequest {
     return modifiedRequest;
   }
 
+  protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCompleteRequest(omRequest);
+  }
+
   protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
           OMRequest omRequest) {
     return new S3MultipartUploadCommitPartRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
index a04f51f..3d399b1 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
@@ -55,7 +56,7 @@ public class TestS3MultipartUploadCompleteRequest
   public void testValidateAndUpdateCacheSuccess() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
@@ -64,7 +65,7 @@ public class TestS3MultipartUploadCompleteRequest
         bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
+        getS3InitiateMultipartUploadReq(initiateMPURequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -78,27 +79,25 @@ public class TestS3MultipartUploadCompleteRequest
         bucketName, keyName, clientID, multipartUploadID, 1);
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
     // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+    addKeyToTable(volumeName, bucketName, keyName, clientID);
 
     s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
         2L, ozoneManagerDoubleBufferHelper);
 
     List<Part> partList = new ArrayList<>();
 
-    partList.add(Part.newBuilder().setPartName(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName) +
-            clientID).setPartNumber(1).build());
+    String partName = getPartName(volumeName, bucketName, keyName, clientID);
+    partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1)
+            .build());
 
     OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
         bucketName, keyName, multipartUploadID, partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+        getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -107,14 +106,71 @@ public class TestS3MultipartUploadCompleteRequest
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+            multipartUploadID);
 
     Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
     Assert.assertNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
     Assert.assertNotNull(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+            getOzoneDBKey(volumeName, bucketName, keyName)));
+  }
+
+  @Test
+  public void testInvalidPartOrderError() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
+            bucketName, keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+            getS3InitiateMultipartUploadReq(initiateMPURequest);
+
+    OMClientResponse omClientResponse =
+            s3InitiateMultipartUploadRequest.validateAndUpdateCache(
+                    ozoneManager, 1L, ozoneManagerDoubleBufferHelper);
+
+    long clientID = Time.now();
+    String multipartUploadID = omClientResponse.getOMResponse()
+            .getInitiateMultiPartUploadResponse().getMultipartUploadID();
+
+    OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName,
+            bucketName, keyName, clientID, multipartUploadID, 1);
+
+    S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
+            getS3MultipartUploadCommitReq(commitMultipartRequest);
+
+    // Add key to open key table.
+    addKeyToTable(volumeName, bucketName, keyName, clientID);
+
+    s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
+            2L, ozoneManagerDoubleBufferHelper);
+
+    List<Part> partList = new ArrayList<>();
+
+    String partName = getPartName(volumeName, bucketName, keyName, clientID);
+    partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23)
+            .build());
+    partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1)
+            .build());
+
+    OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
+            bucketName, keyName, multipartUploadID, partList);
+
+    S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
+            getS3MultipartUploadCompleteReq(completeMultipartRequest);
+
+    omClientResponse =
+            s3MultipartUploadCompleteRequest.validateAndUpdateCache(
+                    ozoneManager, 3L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_PART_ORDER,
+            omClientResponse.getOMResponse().getStatus());
   }
 
   @Test
@@ -129,7 +185,7 @@ public class TestS3MultipartUploadCompleteRequest
         bucketName, keyName, UUID.randomUUID().toString(), partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+        getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -153,7 +209,7 @@ public class TestS3MultipartUploadCompleteRequest
         bucketName, keyName, UUID.randomUUID().toString(), partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+            getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -180,7 +236,7 @@ public class TestS3MultipartUploadCompleteRequest
 
     // Doing  complete multipart upload request with out initiate.
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+            getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -191,5 +247,35 @@ public class TestS3MultipartUploadCompleteRequest
         omClientResponse.getOMResponse().getStatus());
 
   }
+
+  protected void addKeyToTable(String volumeName, String bucketName,
+                             String keyName, long clientID) throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
+            keyName, clientID, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) throws IOException {
+    return omMetadataManager.getMultipartKey(volumeName,
+            bucketName, keyName, multipartUploadID);
+  }
+
+  private String getPartName(String volumeName, String bucketName,
+      String keyName, long clientID) throws IOException {
+
+    String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+    return dbOzoneKey + clientID;
+  }
+
+  protected String getOzoneDBKey(String volumeName, String bucketName,
+                                 String keyName) throws IOException {
+    return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
+  }
+
+  protected String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestV1.java
new file mode 100644
index 0000000..cd5051f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestV1.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
+import java.util.UUID;
+
+/**
+ * Tests S3 Multipart Upload Complete request.
+ */
+public class TestS3MultipartUploadCompleteRequestV1
+    extends TestS3MultipartUploadCompleteRequest {
+
+  @BeforeClass
+  public static void init() {
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+  }
+
+  protected String getKeyName() {
+    String parentDir = UUID.randomUUID().toString() + "/a/b/c";
+    String fileName = "file1";
+    String keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName;
+    return keyName;
+  }
+
+  protected void addKeyToTable(String volumeName, String bucketName,
+      String keyName, long clientID) throws Exception {
+    // need to initialize parentID
+    String parentDir = OzoneFSUtils.getParentDir(keyName);
+    Assert.assertNotEquals("Parent doesn't exists!", parentDir, keyName);
+
+    // add parentDir to dirTable
+    long parentID = getParentID(volumeName, bucketName, keyName);
+    long txnId = 50;
+    long objectId = parentID + 1;
+
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
+                    Time.now());
+
+    // add key to openFileTable
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    omKeyInfoV1.setKeyName(fileName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientID, omKeyInfoV1.getObjectID(),
+            omMetadataManager);
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) throws IOException {
+    OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists(
+            omMetadataManager, volumeName,
+            bucketName, keyName, 0);
+
+    Assert.assertNotNull("key not found in DB!", keyStatus);
+
+    return omMetadataManager.getMultipartKey(keyStatus.getKeyInfo()
+                    .getParentObjectID(), keyStatus.getTrimmedName(),
+            multipartUploadID);
+  }
+
+  private long getParentID(String volumeName, String bucketName,
+                           String keyName) throws IOException {
+    Path keyPath = Paths.get(keyName);
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+
+    return OMFileRequest.getParentID(omBucketInfo.getObjectID(),
+            elements, keyName, omMetadataManager);
+  }
+
+  protected String getOzoneDBKey(String volumeName, String bucketName,
+                                 String keyName) throws IOException {
+    long parentID = getParentID(volumeName, bucketName, keyName);
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return omMetadataManager.getOzonePathKey(parentID, fileName);
+  }
+
+  protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCompleteRequestV1(omRequest);
+  }
+
+  protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCommitPartRequestV1(omRequest);
+  }
+
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+          OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequestV1(initiateMPURequest);
+  }
+
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 106ae61..6f4d6fa 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -268,6 +268,27 @@ public class TestS3MultipartResponse {
             openPartKeyInfoToBeDeleted, isRatisEnabled, omBucketInfo);
   }
 
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseV1(
+          String volumeName, String bucketName, long parentID, String keyName,
+          String multipartUploadID, OmKeyInfo omKeyInfo,
+          OzoneManagerProtocolProtos.Status status,
+          List<OmKeyInfo> unUsedParts) {
+
+    String multipartKey = getMultipartKey(parentID, keyName, multipartUploadID);
+
+    OMResponse omResponse = OMResponse.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.CompleteMultiPartUpload)
+            .setStatus(status).setSuccess(true)
+            .setCompleteMultiPartUploadResponse(
+                    OzoneManagerProtocolProtos.MultipartUploadCompleteResponse
+                            .newBuilder().setBucket(bucketName)
+                            .setVolume(volumeName).setKey(keyName)).build();
+
+    return new S3MultipartUploadCompleteResponseV1(omResponse, multipartKey,
+            omKeyInfo, unUsedParts);
+  }
+
   private String getMultipartKey(long parentID, String keyName,
                                  String multipartUploadID) {
     String fileName = OzoneFSUtils.getFileName(keyName);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseV1.java
new file mode 100644
index 0000000..2683273
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseV1.java
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Test multipart upload complete response.
+ */
+public class TestS3MultipartUploadCompleteResponseV1
+    extends TestS3MultipartResponse {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  @Test
+  public void testAddDBToBatch() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    long txnId = 50;
+    long objectId = parentID + 1;
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String dbMultipartKey = omMetadataManager.getMultipartKey(parentID,
+            fileName, multipartUploadID);
+    long clientId = Time.now();
+    String dbOpenKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+    String dbKey = omMetadataManager.getOzonePathKey(parentID, fileName);
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
+                    Time.now());
+
+    // add key to openFileTable
+    omKeyInfoV1.setKeyName(fileName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientId, omKeyInfoV1.getObjectID(),
+            omMetadataManager);
+
+    addS3MultipartUploadCommitPartResponseV1(volumeName, bucketName, keyName,
+            multipartUploadID, dbOpenKey);
+
+    List<OmKeyInfo> unUsedParts = new ArrayList<>();
+    S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse =
+            createS3CompleteMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, omKeyInfoV1,
+                OzoneManagerProtocolProtos.Status.OK, unUsedParts);
+
+    s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    Assert.assertNotNull(omMetadataManager.getKeyTable().get(dbKey));
+    Assert.assertNull(
+        omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
+    Assert.assertNull(
+            omMetadataManager.getOpenKeyTable().get(dbMultipartKey));
+
+    // As no parts are created, so no entries should be there in delete table.
+    Assert.assertEquals(0, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+  }
+
+  @Test
+  public void testAddDBToBatchWithParts() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    createParentPath(volumeName, bucketName);
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    int deleteEntryCount = 0;
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String dbMultipartKey = omMetadataManager.getMultipartKey(parentID,
+            fileName, multipartUploadID);
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+            addS3InitiateMultipartUpload(volumeName, bucketName, keyName,
+                    multipartUploadID);
+
+    // Add some dummy parts for testing.
+    // Not added any key locations, as this just test is to see entries are
+    // adding to delete table or not.
+    OmMultipartKeyInfo omMultipartKeyInfo =
+            s3InitiateMultipartUploadResponseV1.getOmMultipartKeyInfo();
+
+    OmKeyInfo omKeyInfoV1 = commitS3MultipartUpload(volumeName, bucketName,
+            keyName, multipartUploadID, fileName, dbMultipartKey,
+            omMultipartKeyInfo);
+    // After commits, it adds an entry to the deleted table.
+    deleteEntryCount++;
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentID + 10,
+                    parentID, 100, Time.now());
+    List<OmKeyInfo> unUsedParts = new ArrayList<>();
+    unUsedParts.add(omKeyInfo);
+    S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse =
+            createS3CompleteMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, omKeyInfoV1,
+                    OzoneManagerProtocolProtos.Status.OK, unUsedParts);
+
+    s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+    String dbKey = omMetadataManager.getOzonePathKey(parentID,
+          omKeyInfoV1.getFileName());
+    Assert.assertNotNull(omMetadataManager.getKeyTable().get(dbKey));
+    Assert.assertNull(
+            omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
+    Assert.assertNull(
+            omMetadataManager.getOpenKeyTable().get(dbMultipartKey));
+
+    // As 1 unused parts exists, so 1 unused entry should be there in delete
+    // table.
+    deleteEntryCount++;
+    Assert.assertEquals(deleteEntryCount, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+  }
+
+  private OmKeyInfo commitS3MultipartUpload(String volumeName,
+      String bucketName, String keyName, String multipartUploadID,
+      String fileName, String multipartKey,
+      OmMultipartKeyInfo omMultipartKeyInfo) throws IOException {
+
+    PartKeyInfo part1 = createPartKeyInfoV1(volumeName, bucketName, parentID,
+        fileName, 1);
+
+    addPart(1, part1, omMultipartKeyInfo);
+
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID,
+                    omMultipartKeyInfo.getPartKeyInfo(1),
+                    omMultipartKeyInfo,
+                    OzoneManagerProtocolProtos.Status.OK,  openKey);
+
+    s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+            batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNull(
+        omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    // As 1 parts are created, so 1 entry should be there in delete table.
+    Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+        omMetadataManager.getDeletedTable()));
+
+    String part1DeletedKeyName =
+        omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
+
+    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+        part1DeletedKeyName));
+
+    RepeatedOmKeyInfo ro =
+        omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
+    OmKeyInfo omPartKeyInfo = OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo());
+    Assert.assertEquals(omPartKeyInfo, ro.getOmKeyInfoList().get(0));
+
+    return omPartKeyInfo;
+  }
+
+  private S3InitiateMultipartUploadResponse addS3InitiateMultipartUpload(
+          String volumeName, String bucketName, String keyName,
+          String multipartUploadID) throws IOException {
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+            createS3InitiateMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, new ArrayList<>());
+
+    s3InitiateMultipartUploadResponseV1.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    return s3InitiateMultipartUploadResponseV1;
+  }
+
+  private String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  private void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            dirName, omMetadataManager);
+  }
+
+  private void addS3MultipartUploadCommitPartResponseV1(String volumeName,
+      String bucketName, String keyName, String multipartUploadID,
+      String openKey) throws IOException {
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, null, null,
+                    OzoneManagerProtocolProtos.Status.OK, openKey);
+
+    s3MultipartUploadCommitPartResponse.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 17/19: HDDS-4742. Make trash work with FS Optimised Buckets. (#1915)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 3812776a1b484a1d339884b72105c466e02b555e
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Tue Feb 16 22:07:12 2021 +0530

    HDDS-4742. Make trash work with FS Optimised Buckets. (#1915)
---
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  25 ++++
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     |  12 --
 .../hadoop/ozone/om/TrashOzoneFileSystem.java      | 142 ++++++++++++++++-----
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |  15 +--
 4 files changed, 135 insertions(+), 59 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index e9d4cf9..e7e2eb0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -18,10 +18,12 @@
 package org.apache.hadoop.ozone.om.helpers;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.util.StringUtils;
 
 import javax.annotation.Nonnull;
 import java.nio.file.Paths;
+import java.util.Map;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
@@ -205,4 +207,27 @@ public final class OzoneFSUtils {
     java.nio.file.Path keyPath = Paths.get(keyName);
     return keyPath.getNameCount();
   }
+
+
+  /**
+   * Returns true if the bucket is FS Optimised.
+   * @param bucketMetadata
+   * @return
+   */
+  public static boolean isFSOptimizedBucket(
+      Map<String, String> bucketMetadata) {
+    // layout version V1 represents optimized FS path
+    boolean layoutVersionEnabled =
+        org.apache.commons.lang3.StringUtils.equalsIgnoreCase(
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1,
+            bucketMetadata
+                .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION));
+
+    boolean fsEnabled =
+        Boolean.parseBoolean(bucketMetadata
+            .get(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS));
+
+    return layoutVersionEnabled && fsEnabled;
+  }
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index 03846ae..eb7eaca 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -383,18 +383,6 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   @Override
   @Test
   @Ignore("TODO:HDDS-2939")
-  public void testTrash() throws Exception {
-  }
-
-  @Override
-  @Test
-  @Ignore("TODO:HDDS-2939")
-  public void testRenameToTrashEnabled() throws Exception {
-  }
-
-  @Override
-  @Test
-  @Ignore("TODO:HDDS-2939")
   public void testListStatusWithIntermediateDir() throws Exception {
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
index 6d7a88a..a9408a8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
@@ -35,10 +35,11 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 import org.apache.ratis.protocol.ClientId;
@@ -154,6 +155,11 @@ public class TrashOzoneFileSystem extends FileSystem {
     // check whether the src and dst belong to the same bucket & trashroot.
     OFSPath srcPath = new OFSPath(src);
     OFSPath dstPath = new OFSPath(dst);
+    OmBucketInfo bucket = ozoneManager.getBucketInfo(srcPath.getVolumeName(),
+        srcPath.getBucketName());
+    if (OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata())) {
+      return renameV1(srcPath, dstPath);
+    }
     Preconditions.checkArgument(srcPath.getBucketName().
         equals(dstPath.getBucketName()));
     Preconditions.checkArgument(srcPath.getTrashRoot().
@@ -163,14 +169,50 @@ public class TrashOzoneFileSystem extends FileSystem {
     return true;
   }
 
+  private boolean renameV1(OFSPath srcPath, OFSPath dstPath) {
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        getRenameKeyRequest(srcPath, dstPath);
+    try {
+      if(omRequest != null) {
+        submitRequest(omRequest);
+        return true;
+      }
+      return false;
+    } catch (Exception e){
+      LOG.error("couldnt send rename requestV1", e);
+      return false;
+    }
+  }
+
   @Override
   public boolean delete(Path path, boolean b) throws IOException {
     ozoneManager.getMetrics().incNumTrashDeletes();
+    OFSPath srcPath = new OFSPath(path);
+    OmBucketInfo bucket = ozoneManager.getBucketInfo(srcPath.getVolumeName(),
+        srcPath.getBucketName());
+    if (OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata())) {
+      return deleteV1(srcPath);
+    }
     DeleteIterator iterator = new DeleteIterator(path, true);
     iterator.iterate();
     return true;
   }
 
+  private boolean deleteV1(OFSPath srcPath) {
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        getDeleteKeyRequest(srcPath);
+    try {
+      if(omRequest != null) {
+        submitRequest(omRequest);
+        return true;
+      }
+      return false;
+    } catch (Throwable e) {
+      LOG.error("Couldn't send delete request.", e);
+      return false;
+    }
+  }
+
   @Override
   public FileStatus[] listStatus(Path path) throws  IOException {
     ozoneManager.getMetrics().incNumTrashListStatus();
@@ -377,6 +419,41 @@ public class TrashOzoneFileSystem extends FileSystem {
     }
   }
 
+
+  private OzoneManagerProtocolProtos.OMRequest
+      getRenameKeyRequest(
+      OFSPath src, OFSPath dst) {
+    String volumeName = src.getVolumeName();
+    String bucketName = src.getBucketName();
+    String keyName = src.getKeyName();
+
+    OzoneManagerProtocolProtos.KeyArgs keyArgs =
+        OzoneManagerProtocolProtos.KeyArgs.newBuilder()
+            .setKeyName(keyName)
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .build();
+    String toKeyName = dst.getKeyName();
+    OzoneManagerProtocolProtos.RenameKeyRequest renameKeyRequest =
+        OzoneManagerProtocolProtos.RenameKeyRequest.newBuilder()
+            .setKeyArgs(keyArgs)
+            .setToKeyName(toKeyName)
+            .build();
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        null;
+    try {
+      omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder()
+              .setClientId(CLIENT_ID.toString())
+              .setUserInfo(getUserInfo())
+              .setRenameKeyRequest(renameKeyRequest)
+              .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
+              .build();
+    } catch (IOException e) {
+      LOG.error("Couldn't get userinfo", e);
+    }
+    return omRequest;
+  }
+
   private class RenameIterator extends OzoneListingIterator {
     private final String srcPath;
     private final String dstPath;
@@ -408,40 +485,37 @@ public class TrashOzoneFileSystem extends FileSystem {
       }
       return true;
     }
+  }
 
-    private OzoneManagerProtocolProtos.OMRequest
-        getRenameKeyRequest(
-        OFSPath src, OFSPath dst) {
-      String volumeName = src.getVolumeName();
-      String bucketName = src.getBucketName();
-      String keyName = src.getKeyName();
-
-      OzoneManagerProtocolProtos.KeyArgs keyArgs =
-          OzoneManagerProtocolProtos.KeyArgs.newBuilder()
-              .setKeyName(keyName)
-              .setVolumeName(volumeName)
-              .setBucketName(bucketName)
-              .build();
-      String toKeyName = dst.getKeyName();
-      OzoneManagerProtocolProtos.RenameKeyRequest renameKeyRequest =
-          OzoneManagerProtocolProtos.RenameKeyRequest.newBuilder()
-              .setKeyArgs(keyArgs)
-              .setToKeyName(toKeyName)
-              .build();
-      OzoneManagerProtocolProtos.OMRequest omRequest =
-          null;
-      try {
-        omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder()
-            .setClientId(CLIENT_ID.toString())
-            .setUserInfo(getUserInfo())
-            .setRenameKeyRequest(renameKeyRequest)
-            .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
+  private OzoneManagerProtocolProtos.OMRequest getDeleteKeyRequest(
+      OFSPath srcPath) {
+    String volume = srcPath.getVolumeName();
+    String bucket = srcPath.getBucketName();
+    String key  = srcPath.getKeyName();
+    OzoneManagerProtocolProtos.KeyArgs keyArgs =
+        OzoneManagerProtocolProtos.KeyArgs.newBuilder()
+            .setKeyName(key)
+            .setVolumeName(volume)
+            .setBucketName(bucket)
+            .setRecursive(true)
             .build();
-      } catch (IOException e) {
-        LOG.error("Couldn't get userinfo", e);
-      }
-      return omRequest;
+    OzoneManagerProtocolProtos.DeleteKeyRequest deleteKeyRequest =
+        OzoneManagerProtocolProtos.DeleteKeyRequest.newBuilder()
+            .setKeyArgs(keyArgs).build();
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        null;
+    try {
+      omRequest =
+          OzoneManagerProtocolProtos.OMRequest.newBuilder()
+              .setClientId(CLIENT_ID.toString())
+              .setUserInfo(getUserInfo())
+              .setDeleteKeyRequest(deleteKeyRequest)
+              .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
+              .build();
+    } catch (IOException e) {
+      LOG.error("Couldn't get userinfo", e);
     }
+    return omRequest;
   }
 
   private class DeleteIterator extends OzoneListingIterator {
@@ -467,7 +541,7 @@ public class TrashOzoneFileSystem extends FileSystem {
       for (String keyPath : keyPathList) {
         OFSPath path = new OFSPath(keyPath);
         OzoneManagerProtocolProtos.OMRequest omRequest =
-            getDeleteKeyRequest(path);
+            getDeleteKeysRequest(path);
         try {
           ozoneManager.getMetrics().incNumTrashFilesDeletes();
           submitRequest(omRequest);
@@ -479,7 +553,7 @@ public class TrashOzoneFileSystem extends FileSystem {
     }
 
     private OzoneManagerProtocolProtos.OMRequest
-        getDeleteKeyRequest(
+        getDeleteKeysRequest(
         OFSPath keyPath) {
       String volumeName = keyPath.getVolumeName();
       String bucketName = keyPath.getBucketName();
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 150108c..125934e 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -48,12 +48,12 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenRenewer;
@@ -548,17 +548,6 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
 
   @Override
   public boolean isFSOptimizedBucket() {
-    // layout version V1 represents optimized FS path
-    boolean layoutVersionEnabled =
-            StringUtils.equalsIgnoreCase(
-                    OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1,
-                    bucket.getMetadata()
-                            .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION));
-
-    boolean fsEnabled =
-            Boolean.parseBoolean(bucket.getMetadata()
-                    .get(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS));
-
-    return layoutVersionEnabled && fsEnabled;
+    return OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata());
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org