You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ra...@apache.org on 2021/04/07 03:25:27 UTC

[ozone] branch HDDS-2939 updated (ba06e5c -> bf340bc)

This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a change to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git.


    omit ba06e5c  HDDS-5018. [FSO] Add robot tests for new Filesystem layout (#2071)
    omit 449c3c5  HDDS-4495. [FSO]Delete : Implement async cleanup of garbage and orphan sub-dirs/files (#2093)
    omit 6d76676  HDDS-4932. [FSO] Provide list subpaths function to perform recursive ACL check during delete and rename op (#2008)
    omit c42b298  HDDS-4917.[FSO]Implement ACL requests for new layout (#2024)
    omit 5b58f54  HDDS-4790. Add a tool to parse entries in the prefix format (#1891)
    omit f9a37a4  HDDS-4973. [FSO] Missed to cleanup new FileTables in OMRequests (#2035)
    omit 135ec46  HDDS-4924. [FSO]S3Multipart: Implement OzoneBucket#listParts (#2016)
    omit f95ebb7  HDDS-4835. [FSO]S3Multipart: Implement UploadAbortRequest (#1997)
    omit 1290da6  HDDS-4490.[FSO]RenameAndDelete : make ofs#rename and ofs#delete an atomic operation. (#1965)
    omit db73c48  HDDS-4683. [FSO]ListKeys: do lookup in dir and file tables (#1954)
    omit 96a555d  HDDS-4513.[FSO]OzoneContract unit test case fixes (#1945)
    omit 1239ffe  HDDS-4813. [FSO]S3Multipart: Implement UploadCompleteRequest (#1923)
    omit b449b3f  HDDS-4742. Make trash work with FS Optimised Buckets. (#1915)
    omit 1358e6d  HDDS-4781. [FSO]S3MultiPart: Implement create and commit upload part file (#1897)
    omit f8fe740  HDDS-4805. [FSO]Fix findbugs issues after HDDS-2195 (#1906)
    omit f08f89f  HDDS-4771. [FSO]S3MultiPart: Implement InitiateMultiPartUpload (#1877)
    omit 9d7980c  HDDS-4486. Feature Config: Make proper enableFSPaths and OFS optimized flag combinations (#1848)
    omit bbb2de9  HDDS-4720. RenameKey : add unit test to verify bucket#renameKey (#1847)
    omit 3b12d2f  HDDS-4717. Fix TestOzoneFileSystemV1 and TestObjectStoreV1 cases (#1815)
    omit 30f9161  HDDS-4658. LookupKey: do lookup in dir and file tables (#1775)
    omit eb4a408  HDDS-2942. Putkey : create key table entries for intermediate directories in the key path (#1764)
    omit be61797  HDDS-4514. AllocateBlock : lookup and update open file table for the given path (#1679)
    omit dc0cfb7  HDDS-4596. Directory table, fileTable and openFile Table is missing from the OM DB Definition. (#1724)
    omit 796ed95  HDDS-4321. Fix compilation errors : merge HDDS-4308 and HDDS-4473 changes into the branch (#1668)
    omit 602119f  HDDS-4358: Delete : make delete an atomic operation (#1607)
    omit b90456f  HDDS-4357: Rename : make rename an atomic ops by updating key path entry in dir/file table (#1557)
    omit ed47d68  HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)
    omit 5038179  HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)
    omit 8b8a7e3  HDDS-2949: mkdir : store directory entries in a separate table (#1404)
     add 95739ea  HDDS-3185 Construct a standalone ratis server for SCM. (#720)
     add 31c7386  HDDS-3187 Construct SCM StateMachine. (#819)
     add 8f2107a  Resolve conflicts with merge from master.
     add aa2884c  HDDS-3556 Refactor conf in SCMRatisServer to Java-based conf. (#907)
     add 1f3ef36  HDDS-3186. Introduce generic SCMRatisRequest and SCMRatisResponse. (#959)
     add 30e1751  HDDS-3192. Handle AllocateContainer operation for HA. (#975)
     add c836720  HDDS-3196 New PipelineManager interface to persist to RatisServer. (#980)
     add 988b23a  HDDS-3693 Switch to new StateManager interface. (#1007)
     add 5355939  HDDS-3711. Handle inner classes in SCMRatisRequest and SCMRatisResponse. (#1016)
     add 8e86480  HDDS-3679 Add tests for PipelineManager V2. (#1019)
     add 8d74c0c  HDDS-3652 Add test for SCMRatisResponse. (#1113)
     add 3e7c427  Merge branch 'master' into HDDS-2823
     add 82c30a4  Merge branch 'master' into HDDS-2823
     add 7287e1d  HDDS-3651 Add tests for SCMRatisRequest. (#1112)
     add 144f9a8  HDDS-3911. Compile error in acceptance test on HDDS-2823 (#1157)
     add 565dabc  HDDS-3662 Decouple finalizeAndDestroyPipeline. (#1049)
     add 8a8c9eb  HDDS-3191: switch from SCMPipelineManager to PipelineManagerV2Impl (#1151)
     add 40127b3  Merge branch 'master' into HDDS-2823
     add 6de98c6  Merge branch 'master' into HDDS-2823
     add 58394eb  HDDS-3837. Add isLeader check in SCMHAManager.
     add 3ed29d8  HDDS-4059. SCMStateMachine::applyTransaction() should not invoke TransactionContext.getClientRequest().
     add d482abf  HDDS-4125. Pipeline is not removed when a datanode goes stale.
     add a70964e  HDDS-4130. remove the 1st edition of RatisServer of SCM HA which is copied from OM HA.
     add 9e0dd84  HDDS-3895. Implement container related operations in ContainerManagerImpl.
     add 5f3981c  HDDS-4115. CLI command to show current SCM leader and follower status.
     add 9f7ab46  HDDS-3188. Add failover proxy for SCM block location.
     add 5111126  HDDS-4192. enable SCM Raft Group based on config ozone.scm.names.
     add 43b87fe  HDDS-4365. SCMBlockLocationFailoverProxyProvider should use ScmBlockLocationProtocolPB.class in RPC.setProtocolEngine.
     add 782057a  Resolving master merge conflict.
     add 44a6503  HDDS-4393. Addressing test failures after master merge. (#1587)
     add f30bc4e  Merge branch 'master' into HDDS-2823
     add 285d793  HDDS-4191 Add failover proxy for SCM container location. (#1514)
     add 48b9809  HDDS-4538: Workaround on HDDS-2823, hard code scmUuid and clusterID. (#1649)
     add 0801203  HDDS-4542. Need throw exception to trigger FailoverProxyProvider of SCM client to work (#1652)
     add 0aa9ba3  HDDS-3988: DN can distinguish SCMCommand from stale leader SCM (#1314)
     add 34c393c  HDDS-4551: Remove checkLeader in PipelineManager. (#1658)
     add 91eb342  HDDS-4560. Add ReadWriteLock into PipelineStateManagerV2Impl to protect contentions between RaftServer and PipelineManager. (#1676)
     add 8b16a35  HDDS-4133. Use new ContainerManager in SCM. (#1378)
     add 2c3aed3  Merge branch 'master' into HDDS-2823
     add 7f5e522  HDDS-4575: Refactor SCMHAManager and SCMRatisServer with RaftServer.Division (#1683)
     add 9e9defd  HDDS-4589: Handle potential data loss during ReplicationManager.handleOverReplicatedContainer(). (#1700)
     add adbc89b  HDDS-4628: min/max election timeout of SCMRatisServer is not set properly. (#1742)
     add eb159d3  HDDS-4632: Add term into SetNodeOperationalStateCommand. (#1745)
     add f64a1ce  HDDS-4624. Fix set configs in SCMHAConfigration (#1739)
     add 13f2ece  HDDS-3684. Add tests for replication annotation (#1650)
     add b32c2bb  HDDS-4643. Ratis Snapshot should be loaded from the config (#1756)
     add b0d79e7  Merge branch 'master' into HDDS-2823
     add 1a04bbb  HDDS-4130:  remove the left part of the 1st edition of RatisServer of SCM HA which is copied from OM HA (#1760)
     add a52e8ef  HDDS-4630: Solve deadlock triggered by PipelineActionHandler. (#1743)
     add a5906e6  HDDS-4622: Use singe server raft cluster in MiniOzoneCluster. (#1744)
     add 572a7de  HDDS-4533.Avoid rewriting pipeline information during PipelineStateManagerV2Impl initialization (#1796)
     add 47aa782  HDDS-3212. Allow enabling purge SCM Ratis log (#1802)
     add c957e2b  HDDS-4532. Update pipeline db when pipeline state is changed. (#1785)
     add bb9c68f  HDDS-4568. Add SCMContext to SCM HA (#1737)
     add cb79d30  HDDS-4651. Implement a sequence ID generator (#1810)
     add 3ff677d  HDDS-3208. Implement Ratis snapshot on SCM (#1725)
     add 8ce33f2  HDDS-4695. Support encode and decode ArrayList and Long. (#1831)
     add 3b6918e  HDDS-4295. Add SCMServiceManager to SCM HA. (#1784)
     add b328608  HDDS-3205. Handle BlockDeletingService in SCM HA (#1780)
     add 66ba5bc  Merge branch 'master' into HDDS-2823
     add 5554023  Merge branch 'master' into HDDS-2823
     add 2cb0e1e  HDDS-4800. Fix TestContainerEndpoint after merging master to HDDS-2823. (#1903)
     add b52ed77  HDDS-4806. Fix misc acceptance test: List pipelines on unknown host (#1907)
     add 56bf321  HDDS-4804. Fix TestReconContainerManager after merge master to HDDS-2823 (#1905)
     add 069a9b4  HDDS-4797: Fix findbugs issues after HDDS-2195 (#1902)
     add d402f21  HDDS-4600. SCM HA Config Phase 1: Use SCMNodeDetails as the entry to initialize SCM except for Ratis servers (#1722)
     add 4771661  HDDS-4782.Merge SCMRatisSnapshotInfo and OMRatisSnapshotInfo into a single class (#1880)
     add 3a233ad  HDDS-4778. Add transactionId into deletingTxIDs when remove it from DB (#1873)
     add 97da84d  HDDS-4823. Make SCM Generic config support HA Style. (#1924)
     add 2cfd618  HDDS-4786. Disable Ratis new features to be consistent with master branch (#1899)
     add 4c36f14  HDDS-4756. Add lock for activate/deactivate in PipelineManagerV2 (#1862)
     add e6239c8  HDDS-4812. Move Ratis group creation to scm --init phase (#1916)
     add b60a230  HDDS-4829. Rename MiniOzoneHACluster to MiniOzoneOMHACluster. (#1925)
     add e5b281a  HDDS-4660. Update OMTransactionInfo to TransactionInfo with functions added. (#1932)
     add 1cbd6e3  HDDS-4773. Add functionality to transfer Rocks db checkpoint from leader to follower (#1870)
     add 8d49b81  HDDS-4821. Use SCM service ID in SCMBlockClient and SCM Client. (#1940)
     add 3a473e6  HDDS-4822. Implement scm --bootstrap command. (#1921)
     add d17ff92  HDDS-4107. replace scmID with clusterID for container and volume at Datanode side. (#1947)
     add 1334876  HDDS-4837. Use SCM service ID in finding SCM Datanode address. (#1934)
     add 222be1c  HDDS-4841. Make changes required for SCM admin commands to work with … (#1949)
     add 8e56906  HDDS-4861. [SCM HA Security] Implement generate SCM certificate. (#1958)
     add 920381c  HDDS-4718. Bootstrap new SCM node (#1953)
     add 590a9c5  HDDS-4740: Admin command should take effect on all SCM instance. (#1893)
     add 762fb00  HDDS-4761. Implement increment count optimization in DeletedBlockLog V2 (#1914)
     add 7257243  HDDS-4880. Fix removing local SCM when submitting request to other SCM. (#1971)
     add 8f6941a   Merge remote-tracking branch 'github/master' into HDDS-2823-2021-03-01 (#1972)
     add d682164  HDDS-4884. Fix and enable TestReconTasks. (#1974)
     add db25b54  HDDS-4874. [SCM HA Security] Implement listCertificates based on role (#1969)
     add 1cf9f0d  HDDS-4886. Fix and enable TestEndpoints.java. (#1975)
     add 6c8fea8  HDDS-4866. Datanode with scmID format should work with clusterID dire… (#1959)
     add 4a312ba  HDDS-4807. Add install checkpoint in SCMStateMachine. (#1936)
     add 00c52fb  HDDS-4894. Use PipelineManagerV2Impl in Recon and enable ignored Recon test cases. (#1987)
     add 538cb2d  HDDS-4651: Distributed Sequence ID Gen. (#1980)
     add faeb3d5  HDDS-4890. SCM Ratis enable/disable switch (#1981)
     add 98ea082  HDDS-4863. Enable SCM HA in ozone-ha test (#1967)
     add 14a4e87  HDDS-4912. Support inline upgrade from containerId, delTxnId, localId to SequenceIdGenerator. (#1996)
     add 20b8bca  HDDS-4876. [SCM HA Security] Add failover proxy to SCM Security Server Protocol (#1978)
     add 40c738f  HDDS-4896. Need a tool to upgrade current non-HA SCM node to single node HA cluster (#1999)
     add 33d4351  HDDS-4922. refactor code in SCMStateMachine. (#2007)
     add afeab2c  HDDS-4951. Return with exit code 0 in case of optional scm bootstrap/init  (#2022)
     add ca9e76e  HDDS-4957. Fix flaky test TestSCMInstallSnapshotWithHA#testInstallCorruptedCheckpointFailure (#2025)
     add dd7750e  HDDS-4877. Make SCM ratis server spin up time during initialization configurable (#2028)
     add 2cb80b1  Revert " Merge remote-tracking branch 'github/master' into HDDS-2823-2021-03-01 (#1972)"
     add 58aa3fc  Merge branch 'master' into HDDS-2823-Merge-Master-0312-Fix
     add 6105322  HDDS-4953. [SCM HA Security] Make CertStore DB updates for StoreValidateCertificate go via Ratis (#2034)
     add 456d5f9  Merge remote-tracking branch 'origin/master' into HDDS-4968
     add be28d3f  HDDS-4968. Back-port HDDS-4911 (List container by container state) to ContainerManagerV2
     add dd7c1d6  HDDS-4985. [SCM HA Security] When Ratis enable, SCM secure cluster is not working. (#2052)
     add acc1d21  HDDS-4978. [SCM HA Security] Ozone services should be disabled in SCM HA enabled and security enabled cluster (#2043)
     add 2afba1f  HDDS-4952. Implement listCAs and getRootCA API. (#2042)
     add 0795be5  HDDS-4950. Provide example k8s files to run full HA Ozone (#2023)
     add 5865873  HDDS-4899. Add SCM HA to Chaos tests. (#1989)
     add 9e4bcb2  HDDS-4948. SCM-HA documentation (#2050)
     add e2997ef  HDDS-4998. [SCM HA Security] Make storeValidCertificate method idempotent (#2063)
     add cdc170d  HDDS-4897. [SCM HA Security] Create SCM Cert Client and change DefaultCA to allow selfsigned and intermediary (#2041)
     add 5f20cd4  HDDS-5015. localId is not consistent across SCMs when setup a multi node SCM HA cluster. (#2072)
     add d6e5b71  HDDS-4981. Remove SequenceIdGenerator#StateManagerImpl (#2079)
     add 22c2d41  HDDS-4982.Solve intellj warnings on DBTransactionBuffer (#2082)
     add 96467c3  HDDS-5024. Remove empty share/hadoop directory in dist package. (#2086)
     add 378ea41  HDDS-4904. Remove mention of CSI support (#2036)
     add ddf180f  HDDS-4989. Decommission CLI should return details of nodes which fail (#2057)
     add fda8656  Merge remote-tracking branch 'apache/master' into HDDS-2823
     add 8d57331  Modify the version of ozone-runner (#2078)
     add 685ff3f  HDDS-4995. Skip CI for draft pull requests (#2060)
     add a034028  HDDS-5007. Intellij run configuration for ozonefs shell. (#2081)
     add 4b4b1df  HDDS-5039. Support command changing the logging level of a server without restarting the server (#2091)
     add d5bf345  HDDS-5019. Inconsistent tmp bucket name on different nodes when Kerberos is enabled (#2075)
     add b75b8e3  HDDS-5049. Add timeout support for ratis requests in SCM HA. (#2099)
     add 5bb8dda  HDDS-4915. [SCM HA Security] Integrate CertClient. (#2000)
     add 67c4c13  HDDS-4987. Import container should not delete container contents if container already exists (#2077)
     add fcd0160  HDDS-4901. Remove OmOzoneAclMap from OmVolumeArgs to avoid OzoneAcl conversions (#1992)
     add 3caca5b  HDDS-3752. Fix o3fs list bucket contents issue when without tailing "/" (#2088)
     add 1d4351e  HDDS-5033. SCM may not be able to know full port list of Datanode after Datanode is started. (#2090)
     add 722ae37  HDDS-5022. SCM get roles command should provide Ratis Leader/Follower… (#2098)
     add b57041e  HDDS-4553. ChunkInputStream should release buffer as soon as last byte in the buffer is read (#2062)
     add 8ec5f43  HDDS-4506. Support query parameter based v4 auth in S3g (#1628)
     add d100652  HDDS-5058. Make getScmInfo retry for a duration.
     add 8d331a3  HDDS-4504. Datanode deletion config should be based on number of blocks (#1885)
     add c0b40f7  HDDS-5032. DN stopped to load containers on volume after a container load exception. (#2109)
     add 843d39e  HDDS-5035. Use default config values to solve generated config file conflict (#2087)
     add d1d4303  HDDS-5066. Use fixed vesion from pnpm to build recon (#2115)
     add e5babd2  fix project name in NOTICE.txt (#2112)
     new 7565c97  HDDS-2949: mkdir : store directory entries in a separate table (#1404)
     new f67dde1  HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)
     new 3070276  HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)
     new 623da7b  HDDS-4357: Rename : make rename an atomic ops by updating key path entry in dir/file table (#1557)
     new 4295d4e  HDDS-4358: Delete : make delete an atomic operation (#1607)
     new aebaa81  HDDS-4321. Fix compilation errors : merge HDDS-4308 and HDDS-4473 changes into the branch (#1668)
     new 6febe52  HDDS-4596. Directory table, fileTable and openFile Table is missing from the OM DB Definition. (#1724)
     new aec4ca5  HDDS-4514. AllocateBlock : lookup and update open file table for the given path (#1679)
     new 765b00b  HDDS-2942. Putkey : create key table entries for intermediate directories in the key path (#1764)
     new 21ffe0e  HDDS-4658. LookupKey: do lookup in dir and file tables (#1775)
     new ea5b396  HDDS-4717. Fix TestOzoneFileSystemV1 and TestObjectStoreV1 cases (#1815)
     new 93716f0  HDDS-4720. RenameKey : add unit test to verify bucket#renameKey (#1847)
     new ffdd1a8  HDDS-4486. Feature Config: Make proper enableFSPaths and OFS optimized flag combinations (#1848)
     new f6abf23  HDDS-4771. [FSO]S3MultiPart: Implement InitiateMultiPartUpload (#1877)
     new 68aa4fd  HDDS-4805. [FSO]Fix findbugs issues after HDDS-2195 (#1906)
     new 81a3e81  HDDS-4781. [FSO]S3MultiPart: Implement create and commit upload part file (#1897)
     new 4cb5091  HDDS-4742. Make trash work with FS Optimised Buckets. (#1915)
     new 15aa093  HDDS-4813. [FSO]S3Multipart: Implement UploadCompleteRequest (#1923)
     new f61d74e  HDDS-4513.[FSO]OzoneContract unit test case fixes (#1945)
     new 2dbcaca  HDDS-4683. [FSO]ListKeys: do lookup in dir and file tables (#1954)
     new 11f6c9c  HDDS-4490.[FSO]RenameAndDelete : make ofs#rename and ofs#delete an atomic operation. (#1965)
     new 72dbf17  HDDS-4835. [FSO]S3Multipart: Implement UploadAbortRequest (#1997)
     new 2b657cd  HDDS-4924. [FSO]S3Multipart: Implement OzoneBucket#listParts (#2016)
     new ad4eb75  HDDS-4973. [FSO] Missed to cleanup new FileTables in OMRequests (#2035)
     new 188861f  HDDS-4790. Add a tool to parse entries in the prefix format (#1891)
     new 31687bf  HDDS-4917.[FSO]Implement ACL requests for new layout (#2024)
     new be94af8  HDDS-4932. [FSO] Provide list subpaths function to perform recursive ACL check during delete and rename op (#2008)
     new 055771e  HDDS-4495. [FSO]Delete : Implement async cleanup of garbage and orphan sub-dirs/files (#2093)
     new bf340bc  HDDS-5018. [FSO] Add robot tests for new Filesystem layout (#2071)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (ba06e5c)
            \
             N -- N -- N   refs/heads/HDDS-2939 (bf340bc)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 29 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .github/workflows/post-commit.yml                  |  11 +
 NOTICE.txt                                         |   2 +-
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |  12 +-
 .../hadoop/hdds/scm/XceiverClientManager.java      |  21 +-
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |   4 +-
 .../hadoop/hdds/scm/storage/ChunkInputStream.java  | 119 ++--
 .../hdds/scm/storage/DummyChunkInputStream.java    |   5 +-
 .../hdds/scm/storage/TestChunkInputStream.java     |  25 +-
 hadoop-hdds/common/pom.xml                         |   4 +
 .../java/org/apache/hadoop/hdds/HddsUtils.java     |  80 ++-
 .../hadoop/hdds/protocol/DatanodeDetails.java      |   8 +-
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |   4 +-
 .../hdds/scm/{ScmInfo.java => AddSCMRequest.java}  |  66 +-
 .../scm/DatanodeAdminError.java}                   |  48 +-
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  | 138 +++-
 .../java/org/apache/hadoop/hdds/scm/ScmInfo.java   |  32 +-
 .../apache/hadoop/hdds/scm/client/ScmClient.java   |  22 +-
 .../hadoop/hdds/scm/container/ContainerID.java     |  30 +-
 .../hadoop/hdds/scm/container/ContainerInfo.java   |   7 +-
 .../scm/container/common/helpers/ExcludeList.java  |   2 +-
 .../hadoop/hdds/scm/exceptions/SCMException.java   |   3 +-
 .../hadoop/hdds/scm/ha/SCMHAConfiguration.java     | 320 ++++++++++
 .../org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java  | 187 ++++++
 .../org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java | 249 ++++++++
 .../apache/hadoop/hdds/scm/ha}/package-info.java   |   9 +-
 .../protocol/StorageContainerLocationProtocol.java |  23 +-
 .../security/exception/SCMSecurityException.java   |  43 +-
 .../x509/certificate/utils/CertificateCodec.java   |   6 +-
 .../org/apache/hadoop/hdds/server/ServerUtils.java |   0
 .../apache/hadoop/hdds/server}/package-info.java   |   9 +-
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |  33 +-
 .../org/apache/hadoop/ozone/OzoneSecurityUtil.java |  25 +
 .../org/apache/hadoop/ozone/audit/SCMAction.java   |   3 +-
 .../common/ChunkBufferImplWithByteBufferList.java  |  13 +-
 .../ozone/common/IncrementalChunkBuffer.java       |  13 +-
 .../apache/hadoop/ozone/common/StorageInfo.java    |   3 +-
 .../ozone/common/ha/ratis/RatisSnapshotInfo.java   |  17 +-
 .../ha/ratis/package-info.java}                    |  38 +-
 .../hadoop/ozone/common/utils/BufferUtils.java     |  36 +-
 .../java/org/apache/hadoop/ozone/ha/ConfUtils.java | 112 ++++
 .../org/apache/hadoop/ozone/ha/package-info.java   |  21 +
 .../common/src/main/resources/ozone-default.xml    | 240 +++++++
 .../java/org/apache/hadoop/hdds/TestHddsUtils.java |  46 +-
 .../conf/TestGeneratedConfigurationOverwrite.java  |  66 ++
 .../apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java | 163 +++++
 .../apache/hadoop/hdds/scm/ha}/package-info.java   |   5 +-
 .../java/org/apache/hadoop/hdds/conf/Config.java   |   5 +
 .../org/apache/hadoop/hdds/conf/ConfigTag.java     |   3 +-
 .../hdds/conf/ConfigurationReflectionUtil.java     |  25 +-
 .../apache/hadoop/ozone/HddsDatanodeService.java   |  10 +-
 .../container/common/impl/HddsDispatcher.java      |  12 +-
 .../RandomContainerDeletionChoosingPolicy.java     |  35 +-
 ...TopNOrderedContainerDeletionChoosingPolicy.java |  45 +-
 .../ContainerDeletionChoosingPolicy.java           |   5 +-
 .../common/interfaces/ContainerDispatcher.java     |   2 +-
 .../ozone/container/common/interfaces/Handler.java |   6 +-
 .../common/statemachine/DatanodeConfiguration.java |  17 +
 .../common/statemachine/StateContext.java          |  94 ++-
 .../states/endpoint/HeartbeatEndpointTask.java     |  22 +
 .../states/endpoint/VersionEndpointTask.java       |   2 +-
 .../transport/server/ratis/XceiverServerRatis.java |  13 +-
 .../container/common/utils/HddsVolumeUtil.java     |  23 +-
 .../ozone/container/common/volume/HddsVolume.java  |   4 +-
 .../container/common/volume/MutableVolumeSet.java  |  25 +-
 .../container/keyvalue/KeyValueContainer.java      |  27 +-
 .../ozone/container/keyvalue/KeyValueHandler.java  |   4 +-
 .../helpers/KeyValueContainerLocationUtil.java     |  23 +-
 .../keyvalue/impl/FilePerBlockStrategy.java        |  27 +-
 .../keyvalue/impl/FilePerChunkStrategy.java        |  29 +-
 .../keyvalue/interfaces/ChunkManager.java          |  31 +
 .../background/BlockDeletingService.java           |  99 +--
 .../ozone/container/ozoneimpl/ContainerReader.java | 165 ++++-
 .../ozone/container/ozoneimpl/OzoneContainer.java  |  52 +-
 .../hadoop/ozone/protocol/commands/SCMCommand.java |  20 +-
 .../hadoop/ozone/container/common/ScmTestMock.java |  16 +-
 .../container/common/TestBlockDeletingService.java |  84 ++-
 .../impl/TestContainerDeletionChoosingPolicy.java  |  66 +-
 .../container/common/impl/TestHddsDispatcher.java  |   4 +-
 .../container/keyvalue/TestKeyValueContainer.java  |  25 +
 .../container/keyvalue/TestKeyValueHandler.java    |   2 +-
 .../container/ozoneimpl/TestContainerReader.java   |  53 +-
 .../container/ozoneimpl/TestOzoneContainer.java    |  10 +-
 hadoop-hdds/docs/content/design/scmha.md           |  15 +-
 .../docs/content/feature/{HA.md => OM-HA.md}       |  18 +-
 .../docs/content/feature/{HA.zh.md => OM-HA.zh.md} |   0
 hadoop-hdds/docs/content/feature/SCM-HA.md         | 162 +++++
 hadoop-hdds/docs/content/interface/CSI.md          |   5 +
 hadoop-hdds/docs/content/interface/S3.md           |   3 +-
 hadoop-hdds/docs/static/ozone-usage.png            | Bin 104961 -> 120495 bytes
 .../hadoop/hdds/protocol/SCMSecurityProtocol.java  |  40 ++
 .../SCMSecurityProtocolClientSideTranslatorPB.java |  95 ++-
 .../hdds/scm/metadata/DBTransactionBuffer.java}    |  36 +-
 .../apache/hadoop/hdds/scm/metadata/Replicate.java |  33 +
 .../scm/metadata/SCMDBTransactionBufferImpl.java   |  36 +-
 .../hadoop/hdds/scm/metadata/SCMMetadataStore.java |  17 +-
 .../hadoop/hdds/scm/metadata}/package-info.java    |  13 +-
 .../scm/protocol/ScmBlockLocationProtocol.java     |   6 +
 ...lockLocationProtocolClientSideTranslatorPB.java |  46 +-
 ...inerLocationProtocolClientSideTranslatorPB.java |  78 ++-
 .../SCMBlockLocationFailoverProxyProvider.java     | 267 ++++++++
 .../hadoop/hdds/scm/proxy/SCMClientConfig.java     | 103 +++
 .../SCMContainerLocationFailoverProxyProvider.java | 270 ++++++++
 .../apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java |  70 +++
 .../SCMSecurityProtocolFailoverProxyProvider.java  | 281 +++++++++
 .../hadoop/hdds/scm/proxy}/package-info.java       |   4 +-
 .../certificate/authority/CertificateServer.java   |  20 +-
 .../certificate/authority/CertificateStore.java    |  28 +-
 .../certificate/authority/DefaultApprover.java     |  14 +-
 .../certificate/authority/DefaultCAServer.java     | 133 ++--
 .../authority/PKIProfiles/DefaultCAProfile.java    |  42 +-
 .../authority/PKIProfiles/DefaultProfile.java      |  12 +-
 .../authority/PKIProfiles/PKIProfile.java          |   9 +
 .../x509/certificate/client/CertificateClient.java |  49 ++
 .../certificate/client/DNCertificateClient.java    |   5 +
 .../client/DefaultCertificateClient.java           | 133 +++-
 .../certificate/client/OMCertificateClient.java    |   5 +
 ...ficateClient.java => SCMCertificateClient.java} |  41 +-
 .../certificates/utils/CertificateSignRequest.java |   5 +-
 .../hadoop/hdds/server/http/ProfileServlet.java    |  10 +-
 .../apache/hadoop/hdds/utils/DBStoreHAManager.java |  19 +-
 .../java/org/apache/hadoop/hdds/utils/HAUtils.java | 515 +++++++++++++++
 .../apache/hadoop/hdds/utils/HddsServerUtil.java   | 106 +++-
 .../apache/hadoop/hdds/utils/TransactionInfo.java  |  56 +-
 .../hadoop/hdds/utils/TransactionInfoCodec.java    |  17 +-
 .../x509/certificate/authority/MockCAStore.java    |  19 +-
 .../certificate/authority/TestDefaultCAServer.java | 153 ++++-
 .../src/main/proto/ScmAdminProtocol.proto          |  14 +-
 .../dev-support/findbugsExcludeFile.xml            |   3 +
 hadoop-hdds/interface-client/pom.xml               |  13 +
 .../src/main/proto/InterSCMProtocol.proto          |  46 ++
 .../interface-client/src/main/proto/hdds.proto     |  24 +
 .../src/main/proto/SCMRatisProtocol.proto}         |  44 +-
 .../proto/ScmServerDatanodeHeartbeatProtocol.proto |   4 +
 .../src/main/proto/ScmServerProtocol.proto         |   6 +
 .../src/main/proto/ScmServerSecurityProtocol.proto |  32 +
 hadoop-hdds/pom.xml                                |   6 +
 .../server-scm/dev-support/findbugsExcludeFile.xml |   7 +
 hadoop-hdds/server-scm/pom.xml                     |   9 +
 .../java/org/apache/hadoop/hdds/scm/ScmUtils.java  | 110 ++++
 .../apache/hadoop/hdds/scm/block/BlockManager.java |   5 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java    |  67 +-
 .../hadoop/hdds/scm/block/DeletedBlockLog.java     |  18 +-
 .../hadoop/hdds/scm/block/DeletedBlockLogImpl.java |  33 +-
 ...lockLogImpl.java => DeletedBlockLogImplV2.java} | 284 ++++-----
 .../scm/block/DeletedBlockLogStateManager.java     |  48 ++
 .../scm/block/DeletedBlockLogStateManagerImpl.java | 281 +++++++++
 .../hdds/scm/block/SCMBlockDeletingService.java    |  87 ++-
 .../container/AbstractContainerReportHandler.java  |  36 +-
 .../scm/container/CloseContainerEventHandler.java  |  32 +-
 .../scm/container/ContainerActionsHandler.java     |   2 +-
 .../hdds/scm/container/ContainerManagerImpl.java   | 427 +++++++++++++
 .../hdds/scm/container/ContainerManagerV2.java     | 195 ++++++
 .../hdds/scm/container/ContainerReportHandler.java |  21 +-
 .../hdds/scm/container/ContainerStateManager.java  |  36 +-
 .../scm/container/ContainerStateManagerImpl.java   | 573 +++++++++++++++++
 .../scm/container/ContainerStateManagerV2.java     | 189 ++++++
 .../IncrementalContainerReportHandler.java         |  11 +-
 .../hdds/scm/container/ReplicationManager.java     | 157 ++++-
 .../hdds/scm/container/SCMContainerManager.java    |  14 +-
 .../scm/container/states/ContainerAttribute.java   |   2 +-
 .../scm/container/states/ContainerStateMap.java    | 362 ++++-------
 .../apache/hadoop/hdds/scm/events/SCMEvents.java   |   3 -
 .../hadoop/hdds/scm/ha/CheckedConsumer.java}       |  55 +-
 .../hadoop/hdds/scm/ha/CheckedFunction.java}       |  48 +-
 .../apache/hadoop/hdds/scm/ha/ExecutionUtil.java   |  70 +++
 .../apache/hadoop/hdds/scm/ha/HASecurityUtils.java | 309 +++++++++
 .../hadoop/hdds/scm/ha/InterSCMGrpcClient.java     | 174 ++++++
 .../hdds/scm/ha/InterSCMGrpcProtocolService.java   |  87 +++
 .../hadoop/hdds/scm/ha/InterSCMGrpcService.java    |  83 +++
 .../hdds/scm/ha/MockSCMHADBTransactionBuffer.java  | 100 +++
 .../hadoop/hdds/scm/ha/MockSCMHAManager.java       | 234 +++++++
 .../org/apache/hadoop/hdds/scm/ha/RatisUtil.java   | 177 ++++++
 .../apache/hadoop/hdds/scm/ha/ReflectionUtil.java  |  67 ++
 .../org/apache/hadoop/hdds/scm/ha/SCMContext.java  | 224 +++++++
 .../hdds/scm/ha/SCMDBCheckpointProvider.java       |  88 +++
 .../hadoop/hdds/scm/ha/SCMGrpcOutputStream.java    | 131 ++++
 .../hdds/scm/ha/SCMHADBTransactionBuffer.java      |  44 ++
 .../hdds/scm/ha/SCMHADBTransactionBufferImpl.java  | 142 +++++
 .../hadoop/hdds/scm/ha/SCMHAInvocationHandler.java | 104 ++++
 .../apache/hadoop/hdds/scm/ha/SCMHAManager.java    |  78 +++
 .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java       | 363 +++++++++++
 .../hadoop/hdds/scm/ha/SCMHANodeDetails.java       | 310 +++++++++
 .../apache/hadoop/hdds/scm/ha/SCMNodeDetails.java  | 213 +++++++
 .../apache/hadoop/hdds/scm/ha/SCMRatisRequest.java | 142 +++++
 .../hadoop/hdds/scm/ha/SCMRatisResponse.java       | 109 ++++
 .../apache/hadoop/hdds/scm/ha/SCMRatisServer.java  |  60 ++
 .../hadoop/hdds/scm/ha/SCMRatisServerImpl.java     | 332 ++++++++++
 .../org/apache/hadoop/hdds/scm/ha/SCMService.java  |  75 +++
 .../hadoop/hdds/scm/ha/SCMServiceManager.java      |  87 +++
 .../hadoop/hdds/scm/ha/SCMSnapshotDownloader.java  |  43 ++
 .../hadoop/hdds/scm/ha/SCMSnapshotProvider.java    | 144 +++++
 .../apache/hadoop/hdds/scm/ha/SCMStateMachine.java | 311 +++++++++
 .../hadoop/hdds/scm/ha/SequenceIdGenerator.java    | 328 ++++++++++
 .../hadoop/hdds/scm/ha/io/BigIntegerCodec.java}    |  52 +-
 .../hadoop/hdds/scm/ha/io/BooleanCodec.java}       |  50 +-
 .../org/apache/hadoop/hdds/scm/ha/io/Codec.java}   |  50 +-
 .../apache/hadoop/hdds/scm/ha/io/CodecFactory.java |  63 ++
 .../apache/hadoop/hdds/scm/ha/io/EnumCodec.java    |  50 ++
 .../hdds/scm/ha/io/GeneratedMessageCodec.java      |  48 ++
 .../apache/hadoop/hdds/scm/ha/io/ListCodec.java    |  69 ++
 .../apache/hadoop/hdds/scm/ha/io/LongCodec.java}   |  54 +-
 .../apache/hadoop/hdds/scm/ha/io/StringCodec.java} |  50 +-
 .../hdds/scm/ha/io/X509CertificateCodec.java       |  57 ++
 .../hadoop/hdds/scm/ha/io}/package-info.java       |   5 +-
 .../apache/hadoop/hdds/scm/ha}/package-info.java   |   6 +-
 .../hadoop/hdds/scm/metadata/ContainerIDCodec.java |   4 +-
 .../hadoop/hdds/scm/metadata/SCMDBDefinition.java  |  32 +-
 .../hdds/scm/metadata/SCMMetadataStoreImpl.java    |  63 +-
 .../hadoop/hdds/scm/node/DeadNodeHandler.java      |   8 +-
 .../hadoop/hdds/scm/node/NewNodeHandler.java       |  30 +-
 .../hdds/scm/node/NodeDecommissionManager.java     |  40 +-
 .../scm/node/NonHealthyToHealthyNodeHandler.java   |  16 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |  23 +-
 .../hadoop/hdds/scm/node/StaleNodeHandler.java     |   2 +-
 .../hdds/scm/node/StartDatanodeAdminHandler.java   |   2 +-
 .../scm/pipeline/BackgroundPipelineCreator.java    |   2 +-
 .../scm/pipeline/BackgroundPipelineCreatorV2.java  | 304 +++++++++
 .../hdds/scm/pipeline/PipelineActionHandler.java   |  22 +-
 .../hadoop/hdds/scm/pipeline/PipelineFactory.java  |  11 +-
 .../hadoop/hdds/scm/pipeline/PipelineManager.java  |  19 +-
 .../hdds/scm/pipeline/PipelineManagerMXBean.java   |   3 +-
 .../hdds/scm/pipeline/PipelineManagerV2Impl.java   | 605 ++++++++++++++++++
 .../hdds/scm/pipeline/PipelinePlacementPolicy.java |   5 +-
 .../hadoop/hdds/scm/pipeline/PipelineProvider.java |   6 +-
 .../hdds/scm/pipeline/PipelineReportHandler.java   |  17 +-
 .../hdds/scm/pipeline/PipelineStateManager.java    |  83 ++-
 .../scm/pipeline/PipelineStateManagerV2Impl.java   | 410 ++++++++++++
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |  23 +-
 .../hdds/scm/pipeline/RatisPipelineUtils.java      |  19 +
 .../hdds/scm/pipeline/SCMPipelineManager.java      | 167 ++---
 .../hdds/scm/pipeline/SimplePipelineProvider.java  |   2 +-
 .../hadoop/hdds/scm/pipeline/StateManager.java     | 125 ++++
 .../algorithms/DefaultLeaderChoosePolicy.java      |   4 +-
 .../choose/algorithms/LeaderChoosePolicy.java      |   8 +-
 .../algorithms/LeaderChoosePolicyFactory.java      |   6 +-
 .../algorithms/MinLeaderCountChoosePolicy.java     |   6 +-
 .../SCMSecurityProtocolServerSideTranslatorPB.java | 156 ++++-
 ...lockLocationProtocolServerSideTranslatorPB.java |  23 +
 ...inerLocationProtocolServerSideTranslatorPB.java |  60 +-
 .../hdds/scm/safemode/SCMSafeModeManager.java      |  67 +-
 .../scm/server/OzoneStorageContainerManager.java   |   6 +-
 .../hdds/scm/server/SCMBlockProtocolServer.java    |  40 +-
 .../hadoop/hdds/scm/server/SCMCertStore.java       | 165 ++++-
 .../hdds/scm/server/SCMClientProtocolServer.java   | 114 ++--
 .../hadoop/hdds/scm/server/SCMConfigurator.java    |  50 +-
 .../hdds/scm/server/SCMDatanodeProtocolServer.java |  18 +-
 .../hdds/scm/server/SCMSecurityProtocolServer.java | 170 ++++-
 .../hdds/scm/server/SCMStarterInterface.java       |   4 +
 .../hadoop/hdds/scm/server/SCMStorageConfig.java   |  41 ++
 .../hdds/scm/server/StorageContainerManager.java   | 693 ++++++++++++++++-----
 .../scm/server/StorageContainerManagerStarter.java |  26 +
 .../org/apache/hadoop/hdds/scm/HddsTestUtils.java  |  25 -
 .../hadoop/hdds/scm/TestHddsServerUtils.java       |  13 -
 .../java/org/apache/hadoop/hdds/scm/TestUtils.java |  41 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java    |  86 ++-
 .../hadoop/hdds/scm/block/TestDeletedBlockLog.java | 155 ++---
 .../container/TestCloseContainerEventHandler.java  |  55 +-
 .../scm/container/TestContainerActionsHandler.java |   2 +-
 .../scm/container/TestContainerManagerImpl.java    | 152 +++++
 .../scm/container/TestContainerReportHandler.java  |   7 +-
 .../scm/container/TestContainerStateManager.java   |   3 +-
 .../TestIncrementalContainerReportHandler.java     |  26 +-
 .../hdds/scm/container/TestReplicationManager.java |  51 +-
 .../scm/container/TestSCMContainerManager.java     |  21 +-
 .../scm/container/TestUnknownContainerReport.java  |   7 +-
 .../container/states/TestContainerAttribute.java   |  18 +-
 .../states/TestContainerReplicaCount.java          |   6 +-
 .../hdds/scm/ha/TestReplicationAnnotation.java     | 137 ++++
 .../apache/hadoop/hdds/scm/ha/TestSCMContext.java  |  77 +++
 .../hadoop/hdds/scm/ha/TestSCMHAConfiguration.java | 204 ++++++
 .../hadoop/hdds/scm/ha/TestSCMRatisRequest.java    |  97 +++
 .../hadoop/hdds/scm/ha/TestSCMRatisResponse.java   |  89 +++
 .../hadoop/hdds/scm/ha/TestSCMServiceManager.java  | 167 +++++
 .../hdds/scm/ha/TestSequenceIDGenerator.java       | 116 ++++
 .../hdds/scm/ha/io/TestBigIntegerCodec.java}       |  58 +-
 .../hdds/scm/ha/io/TestX509CertificateCodec.java   |  61 ++
 .../hdds/scm/node/TestContainerPlacement.java      |  18 +-
 .../hdds/scm/node/TestDatanodeAdminMonitor.java    |   2 +-
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  |  33 +-
 .../hdds/scm/node/TestNodeDecommissionManager.java |  27 +-
 .../hdds/scm/node/TestNodeReportHandler.java       |   5 +-
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |   3 +-
 .../hadoop/hdds/scm/node/TestStatisticsUpdate.java |   3 +-
 .../scm/node/states/TestNode2ContainerMap.java     |  10 +-
 .../hdds/scm/node/states/TestNodeStateMap.java     |   8 +-
 .../hdds/scm/pipeline/MockPipelineManager.java     | 237 +++++++
 .../scm/pipeline/MockRatisPipelineProvider.java    |  47 +-
 .../scm/pipeline/TestPipelineActionHandler.java    |   6 +-
 .../TestPipelineDatanodesIntersection.java         |   2 +-
 .../hdds/scm/pipeline/TestPipelineManagerImpl.java | 645 +++++++++++++++++++
 .../scm/pipeline/TestPipelineStateManager.java     |  24 +-
 .../hdds/scm/pipeline/TestSCMPipelineManager.java  |  27 +-
 ...TestSCMStoreImplWithOldPipelineIDKeyFormat.java |  16 +
 .../choose/algorithms/TestLeaderChoosePolicy.java  |   7 +-
 .../safemode/TestHealthyPipelineSafeModeRule.java  |  78 ++-
 .../TestOneReplicaPipelineSafeModeRule.java        |  31 +-
 .../hdds/scm/safemode/TestSCMSafeModeManager.java  | 219 ++++---
 .../scm/server/TestSCMBlockProtocolServer.java     |   6 +-
 .../hadoop/hdds/scm/server/TestSCMCertStore.java   |  62 +-
 .../scm/server/TestSCMSecurityProtocolServer.java  |   3 +-
 .../server/TestStorageContainerManagerStarter.java |  35 ++
 .../ozone/container/common/TestEndPoint.java       |  12 +-
 .../hadoop/ozone/scm/node/TestSCMNodeMetrics.java  |   3 +-
 .../hdds/scm/cli/ContainerOperationClient.java     |  84 +--
 .../org/apache/hadoop/hdds/scm/cli/ScmOption.java  |  27 +-
 .../scm/cli/datanode/DecommissionSubCommand.java   |  13 +-
 .../scm/cli/datanode/MaintenanceSubCommand.java    |  14 +-
 .../scm/cli/datanode/RecommissionSubCommand.java   |  13 +-
 .../cli/datanode/TestDecommissionSubCommand.java   | 130 ++++
 .../cli/datanode/TestMaintenanceSubCommand.java    | 135 ++++
 .../cli/datanode/TestRecommissionSubCommand.java   | 130 ++++
 .../hadoop/ozone/client/io/KeyOutputStream.java    |   2 +-
 .../ozone/client/protocol/ClientProtocol.java      |   2 +
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  28 +-
 .../hadoop/ozone/client/TestHddsClientUtils.java   |  22 -
 .../main/java/org/apache/hadoop/ozone/OFSPath.java |   2 +-
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |  64 +-
 .../org/apache/hadoop/ozone/ha/NodeDetails.java    | 103 +++
 .../org/apache/hadoop/ozone/ha}/package-info.java  |   9 +-
 .../ozone/om/ha/OMFailoverProxyProvider.java       |   3 +-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java      |   4 +
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java     | 357 -----------
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java      |  66 +-
 .../hadoop/ozone/om/helpers/OzoneAclUtil.java      |   2 +-
 .../hadoop/ozone/om/helpers/ServiceInfoEx.java     |   8 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   2 +-
 .../ozone/om/ha/TestOMFailoverProxyProvider.java   |  13 +-
 .../hadoop/ozone/om/helpers/TestOmOzoneAclMap.java |  56 --
 .../hadoop/ozone/om/helpers/TestOmVolumeArgs.java  |  20 +-
 .../hadoop/ozone/om/helpers/TestOzoneAclUtil.java  |  39 ++
 .../intellij/runConfigurations/OzoneFsShell.xml    |  36 ++
 .../dist/dev-support/bin/dist-layout-stitching     |   3 -
 hadoop-ozone/dist/pom.xml                          |   2 +-
 .../src/main/compose/ozone-ha/docker-compose.yaml  |  27 +-
 .../dist/src/main/compose/ozone-ha/docker-config   |   9 +-
 .../dist/src/main/compose/ozone-ha/test.sh         |   9 +-
 .../{ozonesecure-om-ha => ozonesecure-ha}/.env     |   0
 .../docker-compose.yaml                            |  94 ++-
 .../docker-config                                  |  13 +-
 .../{ozonesecure-om-ha => ozonesecure-ha}/test.sh  |   7 +-
 .../main/compose/ozonesecure/docker-compose.yaml   |   1 +
 .../dist/src/main/compose/ozonesecure/test.sh      |  51 +-
 hadoop-ozone/dist/src/main/compose/testlib.sh      |  15 +-
 .../dist/src/main/dockerlibexec/entrypoint.sh      |   5 +
 .../k8s/definitions/ozone/definitions/om-ha.yaml   |  81 +++
 .../k8s/definitions/ozone/definitions/scm-ha.yaml  |  81 +++
 .../ozone/definitions/switchtoemptydir.yaml}       |  32 +-
 .../.env => k8s/examples/ozone-ha/Flekszible}      |  20 +-
 .../examples/{ozone => ozone-ha}/LICENSE.header    |   0
 .../k8s/examples/ozone-ha/config-configmap.yaml    |  35 ++
 .../{ozone => ozone-ha}/datanode-service.yaml      |   0
 .../{ozone => ozone-ha}/datanode-statefulset.yaml  |   0
 .../freon/freon-deployment.yaml                    |   0
 .../examples/{ozone => ozone-ha}/om-service.yaml   |   0
 .../{ozone => ozone-ha}/om-statefulset.yaml        |   0
 .../examples/{ozone => ozone-ha}/s3g-service.yaml  |   0
 .../{ozone => ozone-ha}/s3g-statefulset.yaml       |   0
 .../examples/{ozone => ozone-ha}/scm-service.yaml  |   0
 .../{ozone => ozone-ha}/scm-statefulset.yaml       |   0
 hadoop-ozone/dist/src/main/license/bin/NOTICE.txt  |   2 +-
 .../src/main/smoketest/admincli/container.robot    |   3 +-
 .../src/main/smoketest/admincli/pipeline.robot     |   3 +-
 .../smoketest/admincli/replicationmanager.robot    |   5 +-
 .../src/main/smoketest/admincli/safemode.robot     |   5 +-
 .../{basic/basic.robot => admincli/scmha.robot}    |  17 +-
 .../dist/src/main/smoketest/basic/basic.robot      |   4 +-
 .../dist/src/main/smoketest/basic/links.robot      |  11 +-
 .../src/main/smoketest/basic/ozone-shell-lib.robot |   7 +-
 .../dist/src/main/smoketest/ozonefs/ozonefs.robot  |   2 +-
 .../dist/src/main/smoketest/ozonefs/setup.robot    |   2 +-
 .../main/smoketest/security/ozone-secure-fs.robot  |  19 +-
 .../dist/src/main/smoketest/spnego/web.robot       |   9 +-
 hadoop-ozone/dist/src/shell/ozone/ozone            |   5 +
 .../fault-injection-test/mini-chaos-tests/pom.xml  |   6 +
 .../apache/hadoop/ozone/MiniOzoneChaosCluster.java | 103 ++-
 .../org/apache/hadoop/ozone/OzoneChaosCluster.java |   3 +-
 .../hadoop/ozone/TestAllMiniChaosOzoneCluster.java |   2 +-
 .../ozone/TestDatanodeMiniChaosOzoneCluster.java   |   2 +-
 .../hadoop/ozone/TestMiniChaosOzoneCluster.java    |  18 +-
 .../TestOzoneManagerMiniChaosOzoneCluster.java     |   4 +-
 ...rageContainerManagerMiniChaosOzoneCluster.java} |  17 +-
 .../org/apache/hadoop/ozone/failure/Failures.java  |  65 +-
 .../dev-support/findbugsExcludeFile.xml            |   8 +
 .../fs/ozone/TestOzoneFSWithObjectStoreCreate.java |   2 +-
 .../apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java  |  10 +-
 .../hadoop/hdds/scm/TestSCMInstallSnapshot.java    | 159 +++++
 .../apache/hadoop/hdds/scm/TestSCMSnapshot.java    | 102 +++
 .../TestContainerStateManagerIntegration.java      |  19 +-
 .../metrics/TestSCMContainerManagerMetrics.java    |  21 +-
 .../hdds/scm/pipeline/TestLeaderChoosePolicy.java  |   2 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java    |  10 +-
 .../hdds/scm/pipeline/TestPipelineClose.java       |  16 +-
 .../TestRatisPipelineCreateAndDestroy.java         |  11 +-
 .../hadoop/hdds/scm/pipeline/TestSCMRestart.java   |   6 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |  34 +-
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 115 +++-
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java       | 485 +++++++++++---
 .../hadoop/ozone/MiniOzoneOMHAClusterImpl.java     | 115 ++++
 .../org/apache/hadoop/ozone/OzoneTestUtils.java    |  18 +-
 ...ACluster.java => TestMiniOzoneOMHACluster.java} |   8 +-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   7 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java       |  56 +-
 .../hadoop/ozone/TestStorageContainerManager.java  | 234 +++++--
 .../ozone/client/CertificateClientTestImpl.java    |  29 +
 .../rpc/TestContainerReplicationEndToEnd.java      |   6 +-
 .../client/rpc/TestContainerStateMachine.java      |   1 +
 .../client/rpc/TestDeleteWithSlowFollower.java     |  19 +-
 .../client/rpc/TestDiscardPreallocatedBlocks.java  |   2 +-
 .../client/rpc/TestFailureHandlingByClient.java    |  10 +-
 .../rpc/TestFailureHandlingByClientFlushDelay.java |   2 +-
 .../rpc/TestMultiBlockWritesWithDnFailures.java    |   4 +-
 .../client/rpc/TestOzoneAtRestEncryption.java      |   4 +-
 ...estOzoneClientRetriesOnExceptionFlushDelay.java |   2 +-
 .../rpc/TestOzoneClientRetriesOnExceptions.java    |   4 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |  13 +-
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |   2 +-
 .../ozone/client/rpc/TestSecureOzoneRpcClient.java |   4 +-
 .../client/rpc/read/TestChunkInputStream.java      | 125 +++-
 .../apache/hadoop/ozone/container/TestHelper.java  |  16 +-
 .../TestCloseContainerByPipeline.java              |  41 +-
 .../commandhandler/TestCloseContainerHandler.java  |  10 +-
 .../commandhandler/TestDeleteContainerHandler.java |  30 +-
 .../transport/server/ratis/TestCSMMetrics.java     |   2 +-
 .../container/metrics/TestContainerMetrics.java    |   2 +-
 .../ozoneimpl/TestOzoneContainerWithTLS.java       |   3 +-
 .../container/server/TestContainerServer.java      |   4 +-
 .../server/TestSecureContainerServer.java          |   2 +-
 .../ozone/dn/TestDatanodeLayoutUpgradeTool.java    | 131 ++++
 .../hadoop/ozone/dn/scrubber/TestDataScrubber.java |   6 +-
 .../ozone/freon/TestFreonWithDatanodeRestart.java  |   1 -
 .../ozone/freon/TestFreonWithPipelineDestroy.java  |   2 +-
 .../ozone/om/TestContainerReportWithKeys.java      |   2 +-
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java |   8 +-
 .../hadoop/ozone/om/TestOMRatisSnapshots.java      |  30 +-
 .../ozone/om/TestOzoneManagerConfiguration.java    |  20 +-
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java |  10 +-
 .../ozone/om/TestOzoneManagerHAMetadataOnly.java   |   2 +-
 .../ozone/om/TestOzoneManagerHAWithData.java       |  16 +-
 .../ozone/om/parser/TestOMRatisLogParser.java      |   6 +-
 .../snapshot/TestOzoneManagerSnapshotProvider.java |  12 +-
 .../hadoop/ozone/recon/TestReconAsPassiveScm.java  |  44 +-
 .../apache/hadoop/ozone/recon/TestReconTasks.java  |  20 +-
 .../ozone/recon/TestReconWithOzoneManagerHA.java   |   8 +-
 .../hadoop/ozone/scm/TestCloseContainer.java       |   2 +-
 .../TestSCMContainerPlacementPolicyMetrics.java    |   2 +-
 .../ozone/scm/TestSCMInstallSnapshotWithHA.java    | 358 +++++++++++
 .../org/apache/hadoop/ozone/scm/TestSCMMXBean.java |  16 +-
 .../ozone/scm/TestStorageContainerManagerHA.java   | 231 +++++++
 .../hadoop/ozone/scm/TestXceiverClientManager.java |  10 +-
 .../scm/node/TestDecommissionAndMaintenance.java   |   4 +-
 .../ozone/scm/pipeline/TestSCMPipelineMetrics.java |   3 +-
 .../hadoop/ozone/shell/TestOzoneShellHA.java       |  14 +-
 .../apache/hadoop/ozone/shell/TestScmAdminHA.java  |  80 +++
 .../src/main/proto/OmClientProtocol.proto          |   2 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |   7 +-
 ...nfoCodec.java => TestTransactionInfoCodec.java} |  21 +-
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |   8 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  14 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 121 ++--
 .../hadoop/ozone/om/OzoneManagerStarter.java       |   4 +
 .../apache/hadoop/ozone/om/PrefixManagerImpl.java  |  69 +-
 .../apache/hadoop/ozone/om/VolumeManagerImpl.java  |  60 +-
 .../hadoop/ozone/om/codec/OMDBDefinition.java      |   9 +-
 .../apache/hadoop/ozone/om/ha/OMHANodeDetails.java |  40 +-
 .../apache/hadoop/ozone/om/ha/OMNodeDetails.java   |  94 +--
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java   |   5 +-
 .../ozone/om/ratis/OzoneManagerRatisServer.java    |  14 +-
 .../ozone/om/ratis/OzoneManagerStateMachine.java   |  18 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  80 +--
 .../om/request/bucket/OMBucketCreateRequest.java   |  11 +-
 .../om/request/volume/OMVolumeCreateRequest.java   |   3 +-
 .../om/snapshot/OzoneManagerSnapshotProvider.java  |   2 +-
 .../protocolPB/OzoneManagerRequestHandler.java     |   5 +
 .../ozone/om/ScmBlockLocationTestingClient.java    |   6 +
 .../hadoop/ozone/om/TestOmMetadataManager.java     |  14 +-
 ...tOzoneManagerDoubleBufferWithDummyResponse.java |   9 +-
 ...TestOzoneManagerDoubleBufferWithOMResponse.java |   9 +-
 .../om/ratis/TestOzoneManagerRatisServer.java      |   6 +-
 .../om/ratis/TestOzoneManagerStateMachine.java     |   3 +-
 .../volume/acl/TestOMVolumeAddAclRequest.java      |  10 +-
 .../volume/acl/TestOMVolumeRemoveAclRequest.java   |  12 +-
 .../volume/acl/TestOMVolumeSetAclRequest.java      |  16 +-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  10 +
 hadoop-ozone/recon/pom.xml                         |  11 +-
 .../hadoop/ozone/recon/ReconControllerModule.java  |   6 +-
 .../org/apache/hadoop/ozone/recon/ReconServer.java |   2 +
 .../ozone/recon/api/ClusterStateEndpoint.java      |   2 +-
 .../hadoop/ozone/recon/api/ContainerEndpoint.java  |   4 +-
 .../ozone/recon/fsck/ContainerHealthTask.java      |   8 +-
 .../ozone/recon/scm/ReconContainerManager.java     |  63 +-
 .../recon/scm/ReconContainerReportHandler.java     |   6 +-
 .../recon/scm/ReconDatanodeProtocolServer.java     |   6 +-
 .../ReconIncrementalContainerReportHandler.java    |  12 +-
 .../hadoop/ozone/recon/scm/ReconNodeManager.java   |   4 +-
 .../ozone/recon/scm/ReconPipelineManager.java      |  97 +--
 .../recon/scm/ReconPipelineReportHandler.java      |   4 +-
 .../scm/ReconStorageContainerManagerFacade.java    |  60 +-
 .../webapps/recon/ozone-recon-web/pnpm-lock.yaml   |  66 +-
 .../ozone/recon/api/TestContainerEndpoint.java     |  22 +-
 .../recon/fsck/TestContainerHealthStatus.java      |   2 +-
 .../ozone/recon/fsck/TestContainerHealthTask.java  |  28 +-
 .../TestContainerHealthTaskRecordGenerator.java    |   2 +-
 .../scm/AbstractReconContainerManagerTest.java     |  38 +-
 .../ozone/recon/scm/TestReconContainerManager.java |  50 +-
 ...TestReconIncrementalContainerReportHandler.java |  15 +-
 .../ozone/recon/scm/TestReconPipelineManager.java  |  48 +-
 .../recon/scm/TestReconPipelineReportHandler.java  |   4 +-
 .../hadoop/ozone/s3/AWSSignatureProcessor.java     | 462 --------------
 .../hadoop/ozone/s3/OzoneClientProducer.java       |  92 +--
 .../hadoop/ozone/s3/endpoint/EndpointBase.java     |  14 -
 .../hadoop/ozone/s3/exception/OS3Exception.java    |  14 +-
 .../hadoop/ozone/s3/exception/S3ErrorTable.java    |   2 +-
 .../ozone/s3/signature/AWSSignatureProcessor.java  | 205 ++++++
 .../AuthorizationV2HeaderParser.java}              |  70 +--
 .../AuthorizationV4HeaderParser.java}              | 206 +++---
 .../s3/signature/AuthorizationV4QueryParser.java   |  98 +++
 .../ozone/s3/{header => signature}/Credential.java |   8 +-
 .../hadoop/ozone/s3/signature/SignatureInfo.java   | 113 ++++
 .../hadoop/ozone/s3/signature/SignatureParser.java |  35 +-
 .../s3/{ => signature}/SignatureProcessor.java     |  30 +-
 .../ozone/s3/signature/StringToSignProducer.java   | 324 ++++++++++
 .../s3/{header => signature}/package-info.java     |   2 +-
 .../apache/hadoop/ozone/s3/util/OzoneS3Util.java   |   3 +-
 .../hadoop/ozone/s3/TestAWSSignatureProcessor.java | 141 -----
 .../hadoop/ozone/s3/TestOzoneClientProducer.java   | 100 +--
 .../hadoop/ozone/s3/endpoint/TestBucketPut.java    |  31 +-
 .../hadoop/ozone/s3/endpoint/TestRootList.java     |  24 -
 .../TestAuthorizationV2HeaderParser.java}          |  35 +-
 .../TestAuthorizationV4HeaderParser.java}          | 153 +++--
 .../signature/TestAuthorizationV4QueryParser.java  | 146 +++++
 .../s3/signature/TestStringToSignProducer.java     |  90 +++
 .../hadoop/ozone/s3/util/TestOzoneS3Util.java      |  18 +-
 .../admin/scm/GetScmRatisRolesSubcommand.java      |  45 ++
 .../apache/hadoop/ozone/admin/scm/ScmAdmin.java    |  64 ++
 .../hadoop/ozone/admin/scm}/package-info.java      |  12 +-
 .../apache/hadoop/ozone/debug/DatanodeLayout.java  | 106 ++++
 .../apache/hadoop/ozone/debug/ExportContainer.java |   6 +-
 .../hadoop/ozone/freon/BaseFreonGenerator.java     |  30 +-
 .../ozone/freon/ClosedContainerReplicator.java     |   2 +-
 .../freon/containergenerator/GeneratorOm.java      |   8 +-
 .../ozone/genesis/BenchMarkDatanodeDispatcher.java |   2 +-
 .../apache/hadoop/ozone/genesis/GenesisUtil.java   |   2 +-
 542 files changed, 25058 insertions(+), 5700 deletions(-)
 copy hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/{ScmInfo.java => AddSCMRequest.java} (50%)
 copy hadoop-hdds/common/src/main/java/org/apache/hadoop/{ozone/audit/SCMAction.java => hdds/scm/DatanodeAdminError.java} (52%)
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java
 copy {hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header => hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha}/package-info.java (88%)
 rename hadoop-hdds/{framework => common}/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java (100%)
 copy {hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header => hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server}/package-info.java (88%)
 rename hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java => hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java (84%)
 copy hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/{audit/SCMAction.java => common/ha/ratis/package-info.java} (52%)
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ha/ConfUtils.java
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java
 create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestGeneratedConfigurationOverwrite.java
 create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java
 copy {hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header => hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha}/package-info.java (89%)
 rename hadoop-hdds/docs/content/feature/{HA.md => OM-HA.md} (78%)
 rename hadoop-hdds/docs/content/feature/{HA.zh.md => OM-HA.zh.md} (100%)
 create mode 100644 hadoop-hdds/docs/content/feature/SCM-HA.md
 copy hadoop-hdds/{server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java => framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/DBTransactionBuffer.java} (52%)
 create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/Replicate.java
 copy hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfoEx.java => hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBTransactionBufferImpl.java (56%)
 rename hadoop-hdds/{server-scm => framework}/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java (90%)
 copy {hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header => hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata}/package-info.java (62%)
 create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java
 create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java
 create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMContainerLocationFailoverProxyProvider.java
 create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java
 create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMSecurityProtocolFailoverProxyProvider.java
 rename hadoop-hdds/{server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis => framework/src/main/java/org/apache/hadoop/hdds/scm/proxy}/package-info.java (88%)
 copy hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/{OMCertificateClient.java => SCMCertificateClient.java} (78%)
 copy hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/package-info.java => hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBStoreHAManager.java (67%)
 create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
 rename hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java => hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java (73%)
 rename hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OMTransactionInfoCodec.java => hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfoCodec.java (69%)
 create mode 100644 hadoop-hdds/interface-client/src/main/proto/InterSCMProtocol.proto
 copy hadoop-hdds/{server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java => interface-server/src/main/proto/SCMRatisProtocol.proto} (52%)
 copy hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/{DeletedBlockLogImpl.java => DeletedBlockLogImplV2.java} (61%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManager.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java
 copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/CheckedConsumer.java} (52%)
 copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/CheckedFunction.java} (52%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ExecutionUtil.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/MockSCMHADBTransactionBuffer.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ReflectionUtil.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMDBCheckpointProvider.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBuffer.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMService.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotDownloader.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotProvider.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SequenceIdGenerator.java
 copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BigIntegerCodec.java} (52%)
 copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BooleanCodec.java} (51%)
 copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/Codec.java} (50%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/EnumCodec.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/GeneratedMessageCodec.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ListCodec.java
 copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/LongCodec.java} (51%)
 copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/StringCodec.java} (51%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/X509CertificateCodec.java
 copy {hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header => hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io}/package-info.java (88%)
 copy {hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header => hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha}/package-info.java (88%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreatorV2.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMServiceManager.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java
 copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java => server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestBigIntegerCodec.java} (51%)
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestX509CertificateCodec.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
 create mode 100644 hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
 create mode 100644 hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
 create mode 100644 hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
 create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/NodeDetails.java
 copy hadoop-ozone/{s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header => common/src/main/java/org/apache/hadoop/ozone/ha}/package-info.java (88%)
 delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
 delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmOzoneAclMap.java
 create mode 100644 hadoop-ozone/dev-support/intellij/runConfigurations/OzoneFsShell.xml
 copy hadoop-ozone/dist/src/main/compose/{ozonesecure-om-ha => ozonesecure-ha}/.env (100%)
 rename hadoop-ozone/dist/src/main/compose/{ozonesecure-om-ha => ozonesecure-ha}/docker-compose.yaml (69%)
 rename hadoop-ozone/dist/src/main/compose/{ozonesecure-om-ha => ozonesecure-ha}/docker-config (92%)
 rename hadoop-ozone/dist/src/main/compose/{ozonesecure-om-ha => ozonesecure-ha}/test.sh (88%)
 create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/om-ha.yaml
 create mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/scm-ha.yaml
 copy hadoop-ozone/dist/src/main/{compose/ozonesecure-om-ha/.env => k8s/definitions/ozone/definitions/switchtoemptydir.yaml} (62%)
 rename hadoop-ozone/dist/src/main/{compose/ozonesecure-om-ha/.env => k8s/examples/ozone-ha/Flekszible} (71%)
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => ozone-ha}/LICENSE.header (100%)
 create mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => ozone-ha}/datanode-service.yaml (100%)
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => ozone-ha}/datanode-statefulset.yaml (100%)
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => ozone-ha}/freon/freon-deployment.yaml (100%)
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => ozone-ha}/om-service.yaml (100%)
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => ozone-ha}/om-statefulset.yaml (100%)
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => ozone-ha}/s3g-service.yaml (100%)
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => ozone-ha}/s3g-statefulset.yaml (100%)
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => ozone-ha}/scm-service.yaml (100%)
 copy hadoop-ozone/dist/src/main/k8s/examples/{ozone => ozone-ha}/scm-statefulset.yaml (100%)
 copy hadoop-ozone/dist/src/main/smoketest/{basic/basic.robot => admincli/scmha.robot} (53%)
 copy hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/{TestOzoneManagerMiniChaosOzoneCluster.java => TestStorageContainerManagerMiniChaosOzoneCluster.java} (80%)
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneOMHAClusterImpl.java
 rename hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/{TestMiniOzoneHACluster.java => TestMiniOzoneOMHACluster.java} (93%)
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/TestDatanodeLayoutUpgradeTool.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
 rename hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/{TestOMTransactionInfoCodec.java => TestTransactionInfoCodec.java} (78%)
 delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSSignatureProcessor.java
 create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java
 rename hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/{header/AuthorizationHeaderV2.java => signature/AuthorizationV2HeaderParser.java} (66%)
 rename hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/{header/AuthorizationHeaderV4.java => signature/AuthorizationV4HeaderParser.java} (57%)
 create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4QueryParser.java
 rename hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/{header => signature}/Credential.java (93%)
 create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/SignatureInfo.java
 copy hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java => hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/SignatureParser.java (67%)
 rename hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/{ => signature}/SignatureProcessor.java (63%)
 create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java
 copy hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/{header => signature}/package-info.java (95%)
 delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAWSSignatureProcessor.java
 rename hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/{header/TestAuthorizationHeaderV2.java => signature/TestAuthorizationV2HeaderParser.java} (69%)
 rename hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/{header/TestAuthorizationHeaderV4.java => signature/TestAuthorizationV4HeaderParser.java} (71%)
 create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestAuthorizationV4QueryParser.java
 create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
 rename hadoop-ozone/{s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header => tools/src/main/java/org/apache/hadoop/ozone/admin/scm}/package-info.java (82%)
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DatanodeLayout.java

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 21/29: HDDS-4490.[FSO]RenameAndDelete : make ofs#rename and ofs#delete an atomic operation. (#1965)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 11f6c9ca24bddbd219b202c4fc80518a78603029
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Tue Mar 2 19:41:23 2021 +0530

    HDDS-4490.[FSO]RenameAndDelete : make ofs#rename and ofs#delete an atomic operation. (#1965)
---
 .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java | 224 +++++++++++++++++++--
 .../fs/ozone/TestRootedOzoneFileSystemV1.java      | 117 +++++++++++
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |  20 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |   8 +-
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |  17 ++
 .../hadoop/ozone/om/TrashOzoneFileSystem.java      |   2 +
 .../fs/ozone/BasicRootedOzoneFileSystem.java       |  37 ++++
 7 files changed, 398 insertions(+), 27 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index 3e12373..b5b7703 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.TrashPolicyOzone;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
 import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
@@ -61,7 +62,10 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -71,12 +75,12 @@ import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Optional;
+import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.stream.Collectors;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
-import static org.apache.hadoop.fs.FileSystem.LOG;
 import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
 import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
 import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
@@ -85,8 +89,10 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * Ozone file system tests that are not covered by contract tests.
@@ -95,6 +101,9 @@ import static org.junit.Assert.assertTrue;
 @RunWith(Parameterized.class)
 public class TestRootedOzoneFileSystem {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRootedOzoneFileSystem.class);
+
   @Parameterized.Parameters
   public static Collection<Object[]> data() {
     return Arrays.asList(
@@ -110,11 +119,20 @@ public class TestRootedOzoneFileSystem {
     omRatisEnabled = enableOMRatis;
   }
 
+  public static FileSystem getFs() {
+    return fs;
+  }
+
+  public static Path getBucketPath() {
+    return bucketPath;
+  }
+
   @Rule
   public Timeout globalTimeout = Timeout.seconds(300);;
 
   private static boolean enabledFileSystemPaths;
   private static boolean omRatisEnabled;
+  private static boolean isBucketFSOptimized = false;
 
   private static OzoneConfiguration conf;
   private static MiniOzoneCluster cluster = null;
@@ -136,8 +154,13 @@ public class TestRootedOzoneFileSystem {
     conf = new OzoneConfiguration();
     conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
     conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-        enabledFileSystemPaths);
+    if (isBucketFSOptimized) {
+      TestOMRequestUtils.configureFSOptimizedPaths(conf,
+          true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+    } else {
+      conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+          enabledFileSystemPaths);
+    }
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .build();
@@ -179,6 +202,10 @@ public class TestRootedOzoneFileSystem {
     return cluster.getOzoneManager().getMetrics();
   }
 
+  protected static void setIsBucketFSOptimized(boolean isBucketFSO) {
+    isBucketFSOptimized = isBucketFSO;
+  }
+
   @Test
   public void testOzoneFsServiceLoader() throws IOException {
     OzoneConfiguration confTestLoader = new OzoneConfiguration();
@@ -1226,6 +1253,11 @@ public class TestRootedOzoneFileSystem {
     long prevNumTrashRenames = getOMMetrics().getNumTrashRenames();
     long prevNumTrashFileRenames = getOMMetrics().getNumTrashFilesRenames();
 
+    long prevNumTrashAtomicDirDeletes = getOMMetrics()
+        .getNumTrashAtomicDirDeletes();
+    long prevNumTrashAtomicDirRenames = getOMMetrics()
+        .getNumTrashAtomicDirRenames();
+
     // Call moveToTrash. We can't call protected fs.rename() directly
     trash.moveToTrash(keyPath1);
     // for key in second bucket
@@ -1254,11 +1286,17 @@ public class TestRootedOzoneFileSystem {
       }
     }, 1000, 180000);
 
-    // This condition should pass after the checkpoint
-    Assert.assertTrue(getOMMetrics()
-        .getNumTrashRenames() > prevNumTrashRenames);
-    Assert.assertTrue(getOMMetrics()
-        .getNumTrashFilesRenames() > prevNumTrashFileRenames);
+    if (isBucketFSOptimized){
+      Assert.assertTrue(getOMMetrics()
+          .getNumTrashAtomicDirRenames() > prevNumTrashAtomicDirRenames);
+    } else {
+      // This condition should pass after the checkpoint
+      Assert.assertTrue(getOMMetrics()
+          .getNumTrashRenames() > prevNumTrashRenames);
+      // With new layout version, file renames wouldn't be counted
+      Assert.assertTrue(getOMMetrics()
+          .getNumTrashFilesRenames() > prevNumTrashFileRenames);
+    }
 
     // wait for deletion of checkpoint dir
     GenericTestUtils.waitFor(()-> {
@@ -1273,10 +1311,16 @@ public class TestRootedOzoneFileSystem {
     }, 1000, 120000);
 
     // This condition should succeed once the checkpoint directory is deleted
-    GenericTestUtils.waitFor(
-        () -> getOMMetrics().getNumTrashDeletes() > prevNumTrashDeletes
-            && getOMMetrics().getNumTrashFilesDeletes()
-            > prevNumTrashFileDeletes, 100, 180000);
+    if(isBucketFSOptimized){
+      GenericTestUtils.waitFor(
+          () -> getOMMetrics().getNumTrashAtomicDirDeletes() >
+              prevNumTrashAtomicDirDeletes, 100, 180000);
+    } else {
+      GenericTestUtils.waitFor(
+          () -> getOMMetrics().getNumTrashDeletes() > prevNumTrashDeletes
+              && getOMMetrics().getNumTrashFilesDeletes()
+              >= prevNumTrashFileDeletes, 100, 180000);
+    }
     // Cleanup
     ofs.delete(trashRoot, true);
     ofs.delete(trashRoot2, true);
@@ -1309,4 +1353,160 @@ public class TestRootedOzoneFileSystem {
     LambdaTestUtils.intercept(InvalidPathException.class, "Invalid path Name",
         () -> fs.create(path, false));
   }
+
+  @Test
+  public void testRenameDir() throws Exception {
+    final String dir = "dir1";
+    final Path source = new Path(getBucketPath(), dir);
+    final Path dest = new Path(source.toString() + ".renamed");
+    // Add a sub-dir to the directory to be moved.
+    final Path subdir = new Path(source, "sub_dir1");
+    getFs().mkdirs(subdir);
+    LOG.info("Created dir {}", subdir);
+    LOG.info("Will move {} to {}", source, dest);
+    getFs().rename(source, dest);
+    assertTrue("Directory rename failed", getFs().exists(dest));
+    // Verify that the subdir is also renamed i.e. keys corresponding to the
+    // sub-directories of the renamed directory have also been renamed.
+    assertTrue("Keys under the renamed directory not renamed",
+        getFs().exists(new Path(dest, "sub_dir1")));
+    // cleanup
+    getFs().delete(dest, true);
+  }
+
+  @Test
+  public void testRenameFile() throws Exception {
+    final String dir = "/dir" + new Random().nextInt(1000);
+    Path dirPath = new Path(getBucketPath() +dir);
+    getFs().mkdirs(dirPath);
+
+    Path file1Source = new Path(getBucketPath() + dir
+        + "/file1_Copy");
+    ContractTestUtils.touch(getFs(), file1Source);
+    Path file1Destin = new Path(getBucketPath() + dir + "/file1");
+    assertTrue("Renamed failed", getFs().rename(file1Source, file1Destin));
+    assertTrue("Renamed failed: /dir/file1", getFs().exists(file1Destin));
+    FileStatus[] fStatus = getFs().listStatus(dirPath);
+    assertEquals("Renamed failed", 1, fStatus.length);
+    getFs().delete(getBucketPath(), true);
+  }
+
+
+
+  /**
+   * Rename file to an existed directory.
+   */
+  @Test
+  public void testRenameFileToDir() throws Exception {
+    final String dir = "/dir" + new Random().nextInt(1000);
+    Path dirPath = new Path(getBucketPath() +dir);
+    getFs().mkdirs(dirPath);
+
+    Path file1Destin = new Path(getBucketPath() + dir  + "/file1");
+    ContractTestUtils.touch(getFs(), file1Destin);
+    Path abcRootPath = new Path(getBucketPath() + "/a/b/c");
+    getFs().mkdirs(abcRootPath);
+    assertTrue("Renamed failed", getFs().rename(file1Destin, abcRootPath));
+    assertTrue("Renamed filed: /a/b/c/file1", getFs().exists(new Path(
+        abcRootPath, "file1")));
+    getFs().delete(getBucketPath(), true);
+  }
+
+  /**
+   * Rename to the source's parent directory, it will succeed.
+   * 1. Rename from /root_dir/dir1/dir2 to /root_dir.
+   * Expected result : /root_dir/dir2
+   * <p>
+   * 2. Rename from /root_dir/dir1/file1 to /root_dir.
+   * Expected result : /root_dir/file1.
+   */
+  @Test
+  public void testRenameToParentDir() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(getBucketPath() + dir2);
+    getFs().mkdirs(dir2SourcePath);
+    final Path destRootPath = new Path(getBucketPath() + root);
+
+    Path file1Source = new Path(getBucketPath() + dir1 + "/file2");
+    ContractTestUtils.touch(getFs(), file1Source);
+
+    // rename source directory to its parent directory(destination).
+    assertTrue("Rename failed", getFs().rename(dir2SourcePath, destRootPath));
+    final Path expectedPathAfterRename =
+        new Path(getBucketPath() + root + "/dir2");
+    assertTrue("Rename failed",
+        getFs().exists(expectedPathAfterRename));
+
+    // rename source file to its parent directory(destination).
+    assertTrue("Rename failed", getFs().rename(file1Source, destRootPath));
+    final Path expectedFilePathAfterRename =
+        new Path(getBucketPath() + root + "/file2");
+    assertTrue("Rename failed",
+        getFs().exists(expectedFilePathAfterRename));
+    getFs().delete(getBucketPath(), true);
+  }
+
+  /**
+   *  Cannot rename a directory to its own subdirectory.
+   */
+  @Test
+  public void testRenameDirToItsOwnSubDir() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final Path dir1Path = new Path(getBucketPath() + dir1);
+    // Add a sub-dir1 to the directory to be moved.
+    final Path subDir1 = new Path(dir1Path, "sub_dir1");
+    getFs().mkdirs(subDir1);
+    LOG.info("Created dir1 {}", subDir1);
+
+    final Path sourceRoot = new Path(getBucketPath() + root);
+    LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
+    //  rename should fail and return false
+    try{
+      getFs().rename(sourceRoot, subDir1);
+      fail("Should throw exception : Cannot rename a directory to" +
+          " its own subdirectory");
+    } catch (IllegalArgumentException e){
+      //expected
+    }
+  }
+
+  /**
+   * Cleanup keyTable and directoryTable explicitly as FS delete operation
+   * is not yet supported.
+   * Fails if the (a) parent of dst does not exist or (b) parent is a file.
+   */
+  @Test
+  public void testRenameDestinationParentDoesntExist() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(getBucketPath() + dir2);
+    getFs().mkdirs(dir2SourcePath);
+    // (a) parent of dst does not exist.  /root_dir/b/c
+    final Path destinPath = new Path(getBucketPath()
+        + root + "/b/c");
+
+    // rename should throw exception
+    try {
+      getFs().rename(dir2SourcePath, destinPath);
+      fail("Should fail as parent of dst does not exist!");
+    } catch (FileNotFoundException fnfe){
+      //expected
+    }
+    // (b) parent of dst is a file. /root_dir/file1/c
+    Path filePath = new Path(getBucketPath() + root + "/file1");
+    ContractTestUtils.touch(getFs(), filePath);
+    Path newDestinPath = new Path(filePath, "c");
+    // rename shouldthrow exception
+    try{
+      getFs().rename(dir2SourcePath, newDestinPath);
+      fail("Should fail as parent of dst is a file!");
+    } catch (IOException e){
+      //expected
+    }
+  }
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemV1.java
new file mode 100644
index 0000000..b0b3152
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemV1.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+
+
+@RunWith(Parameterized.class)
+public class TestRootedOzoneFileSystemV1 extends TestRootedOzoneFileSystem {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRootedOzoneFileSystemV1.class);
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(
+        new Object[]{true, true},
+        new Object[]{true, false},
+        new Object[]{false, true},
+        new Object[]{false, false});
+  }
+
+  public TestRootedOzoneFileSystemV1(boolean setDefaultFs,
+      boolean enableOMRatis) throws Exception {
+    super(setDefaultFs, enableOMRatis);
+  }
+
+  @BeforeClass
+  public static void init() throws Exception {
+    setIsBucketFSOptimized(true);
+    TestRootedOzoneFileSystem.init();
+  }
+
+  /**
+   * OFS: Test recursive listStatus on root and volume.
+   */
+  @Override
+  @Ignore("TODO:HDDS-4360")
+  public void testListStatusRootAndVolumeRecursive() throws IOException {
+  }
+
+  /**
+   * Cleanup keyTable and directoryTable explicitly as FS delete operation
+   * is not yet supported.
+   * Fails if the (a) parent of dst does not exist or (b) parent is a file.
+   */
+  @Override
+  @Test
+  public void testRenameDestinationParentDoesntExist() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(getBucketPath() + dir2);
+    getFs().mkdirs(dir2SourcePath);
+    // (a) parent of dst does not exist.  /root_dir/b/c
+    final Path destinPath = new Path(getBucketPath()
+        + root + "/b/c");
+
+    // rename should fail and return false
+    Assert.assertFalse(getFs().rename(dir2SourcePath, destinPath));
+    // (b) parent of dst is a file. /root_dir/file1/c
+    Path filePath = new Path(getBucketPath() + root + "/file1");
+    ContractTestUtils.touch(getFs(), filePath);
+    Path newDestinPath = new Path(filePath, "c");
+    // rename should fail and return false
+    Assert.assertFalse(getFs().rename(dir2SourcePath, newDestinPath));
+  }
+
+  /**
+   *  Cannot rename a directory to its own subdirectory.
+   */
+  @Override
+  @Test
+  public void testRenameDirToItsOwnSubDir() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final Path dir1Path = new Path(getBucketPath() + dir1);
+    // Add a sub-dir1 to the directory to be moved.
+    final Path subDir1 = new Path(dir1Path, "sub_dir1");
+    getFs().mkdirs(subDir1);
+    LOG.info("Created dir1 {}", subDir1);
+
+    final Path sourceRoot = new Path(getBucketPath() + root);
+    LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
+    //  rename should fail and return false
+    Assert.assertFalse(getFs().rename(sourceRoot, subDir1));
+  }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index a61c798..446ed5c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -51,9 +51,9 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 
 import org.junit.After;
@@ -68,7 +68,6 @@ import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
-import static org.junit.Assert.assertEquals;
 
 import org.junit.rules.ExpectedException;
 
@@ -214,10 +213,7 @@ public class TestReadRetries {
     readKey(bucket, keyName, value);
 
     // read intermediate directory
-    verifyIntermediateDir(bucket, "a/b/c/");
     verifyIntermediateDir(bucket, "a/b/c");
-    verifyIntermediateDir(bucket, "/a/b/c/");
-    verifyIntermediateDir(bucket, "/a/b/c");
 
     // shutdown the second datanode
     datanodeDetails = datanodes.get(1);
@@ -241,15 +237,11 @@ public class TestReadRetries {
     factory.releaseClient(clientSpi, false);
   }
 
-  private void verifyIntermediateDir(OzoneBucket bucket,
-      String dir) throws IOException {
-    try {
-      bucket.getKey(dir);
-      fail("Should throw exception for directory listing");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ome.getResult());
-    }
+  private void verifyIntermediateDir(OzoneBucket bucket, String dir)
+      throws IOException {
+    OzoneFileStatus fileStatus = bucket.getFileStatus(dir);
+    Assert.assertTrue(fileStatus.isDirectory());
+    Assert.assertEquals(dir, fileStatus.getTrimmedName());
   }
 
   private void readKey(OzoneBucket bucket, String keyName, String data)
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index f6ae506..24afc5f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -717,7 +717,13 @@ public class KeyManagerImpl implements KeyManager {
     if (fileStatus == null) {
       return null;
     }
-    return fileStatus.isFile() ? fileStatus.getKeyInfo() : null;
+    // Appended trailing slash to represent directory to the user
+    if (fileStatus.isDirectory()) {
+      String keyPath = OzoneFSUtils.addTrailingSlashIfNeeded(
+          fileStatus.getKeyInfo().getKeyName());
+      fileStatus.getKeyInfo().setKeyName(keyPath);
+    }
+    return fileStatus.getKeyInfo();
   }
 
   private void addBlockToken4Read(OmKeyInfo value) throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
index 7ce0a16..cce545f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -154,6 +154,8 @@ public class OMMetrics {
   private @Metric MutableCounterLong numTrashFails;
   private @Metric MutableCounterLong numTrashRootsEnqueued;
   private @Metric MutableCounterLong numTrashRootsProcessed;
+  private @Metric MutableCounterLong numTrashAtomicDirRenames;
+  private @Metric MutableCounterLong numTrashAtomicDirDeletes;
 
   private final DBCheckpointMetrics dbCheckpointMetrics;
 
@@ -909,6 +911,13 @@ public class OMMetrics {
     return numTrashFilesDeletes.value();
   }
 
+  public long getNumTrashAtomicDirRenames() {
+    return numTrashAtomicDirRenames.value();
+  }
+
+  public long getNumTrashAtomicDirDeletes() {
+    return numTrashAtomicDirDeletes.value();
+  }
 
   public void incNumTrashActiveCycles() {
     numTrashActiveCycles.incr();
@@ -926,6 +935,14 @@ public class OMMetrics {
     numTrashFails.incr();
   }
 
+  public void incNumTrashAtomicDirRenames() {
+    numTrashAtomicDirRenames.incr();
+  }
+
+  public void incNumTrashAtomicDirDeletes() {
+    numTrashAtomicDirDeletes.incr();
+  }
+
   public void unRegister() {
     MetricsSystem ms = DefaultMetricsSystem.instance();
     ms.unregisterSource(SOURCE_NAME);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
index a9408a8..9ca673a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
@@ -170,6 +170,7 @@ public class TrashOzoneFileSystem extends FileSystem {
   }
 
   private boolean renameV1(OFSPath srcPath, OFSPath dstPath) {
+    ozoneManager.getMetrics().incNumTrashAtomicDirRenames();
     OzoneManagerProtocolProtos.OMRequest omRequest =
         getRenameKeyRequest(srcPath, dstPath);
     try {
@@ -199,6 +200,7 @@ public class TrashOzoneFileSystem extends FileSystem {
   }
 
   private boolean deleteV1(OFSPath srcPath) {
+    ozoneManager.getMetrics().incNumTrashAtomicDirDeletes();
     OzoneManagerProtocolProtos.OMRequest omRequest =
         getDeleteKeyRequest(srcPath);
     try {
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
index 55edd00..08d2008 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
@@ -308,6 +308,10 @@ public class BasicRootedOzoneFileSystem extends FileSystem {
     if (!ofsSrc.isInSameBucketAs(ofsDst)) {
       throw new IOException("Cannot rename a key to a different bucket");
     }
+    OzoneBucket bucket = adapterImpl.getBucket(ofsSrc, false);
+    if (OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata())) {
+      return renameV1(bucket, ofsSrc, ofsDst);
+    }
 
     // Cannot rename a directory to its own subdirectory
     Path dstParent = dst.getParent();
@@ -385,6 +389,29 @@ public class BasicRootedOzoneFileSystem extends FileSystem {
     return result;
   }
 
+  private boolean renameV1(OzoneBucket bucket,
+      OFSPath srcPath, OFSPath dstPath) throws IOException {
+    // construct src and dst key paths
+    String srcKeyPath = srcPath.getNonKeyPathNoPrefixDelim() +
+        OZONE_URI_DELIMITER + srcPath.getKeyName();
+    String dstKeyPath = dstPath.getNonKeyPathNoPrefixDelim() +
+        OZONE_URI_DELIMITER + dstPath.getKeyName();
+    try {
+      adapterImpl.rename(bucket, srcKeyPath, dstKeyPath);
+    } catch (OMException ome) {
+      LOG.error("rename key failed: {}. source:{}, destin:{}",
+          ome.getMessage(), srcKeyPath, dstKeyPath);
+      if (OMException.ResultCodes.KEY_ALREADY_EXISTS == ome.getResult() ||
+          OMException.ResultCodes.KEY_RENAME_ERROR  == ome.getResult() ||
+          OMException.ResultCodes.KEY_NOT_FOUND == ome.getResult()) {
+        return false;
+      } else {
+        throw ome;
+      }
+    }
+    return true;
+  }
+
   /**
    * Intercept rename to trash calls from TrashPolicyDefault.
    */
@@ -501,6 +528,16 @@ public class BasicRootedOzoneFileSystem extends FileSystem {
         return false;
       }
 
+
+      if (!ofsPath.isVolume() && !ofsPath.isBucket()) {
+        OzoneBucket bucket = adapterImpl.getBucket(ofsPath, false);
+        if (OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata())) {
+          String ofsKeyPath = ofsPath.getNonKeyPathNoPrefixDelim() +
+              OZONE_URI_DELIMITER + ofsPath.getKeyName();
+          return adapterImpl.deleteObject(ofsKeyPath, recursive);
+        }
+      }
+
       // Handle delete volume
       if (ofsPath.isVolume()) {
         String volumeName = ofsPath.getVolumeName();

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 11/29: HDDS-4717. Fix TestOzoneFileSystemV1 and TestObjectStoreV1 cases (#1815)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit ea5b39679e57efdd6dcaf7a76e3a4bd035290c28
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Fri Jan 22 21:15:11 2021 +0530

    HDDS-4717. Fix TestOzoneFileSystemV1 and TestObjectStoreV1 cases (#1815)
---
 .../hadoop/ozone/client/io/KeyOutputStream.java    |   8 +
 .../apache/hadoop/fs/ozone/TestOzoneDirectory.java |  26 ---
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |   9 +-
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 192 ++++++++++++---------
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 112 ++++++++----
 .../ozone/om/request/key/OMKeyDeleteRequestV1.java |   6 +
 6 files changed, 215 insertions(+), 138 deletions(-)

diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 96a4c42..ff7b7fd 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -92,6 +92,8 @@ public class KeyOutputStream extends OutputStream {
   private boolean isException;
   private final BlockOutputStreamEntryPool blockOutputStreamEntryPool;
 
+  private long clientID;
+
   /**
    * A constructor for testing purpose only.
    */
@@ -127,6 +129,11 @@ public class KeyOutputStream extends OutputStream {
     return retryCount;
   }
 
+  @VisibleForTesting
+  public long getClientID() {
+    return clientID;
+  }
+
   @SuppressWarnings({"parameternumber", "squid:S00107"})
   public KeyOutputStream(
       OzoneClientConfig config,
@@ -158,6 +165,7 @@ public class KeyOutputStream extends OutputStream {
     this.retryCount = 0;
     this.isException = false;
     this.writeOffset = 0;
+    this.clientID = handler.getId();
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
index 87e9f09..56c6177 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
@@ -23,8 +23,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.TestDataUtil;
@@ -33,7 +31,6 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -49,7 +46,6 @@ import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
-import static org.junit.Assert.fail;
 
 /**
  * Test verifies the entries and operations in directory table.
@@ -95,28 +91,6 @@ public class TestOzoneDirectory {
     Assert.assertEquals("Wrong OM numKeys metrics",
             4, cluster.getOzoneManager().getMetrics().getNumKeys());
 
-    // verify entries in directory table
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmDirectoryInfo>> iterator =
-            omMgr.getDirectoryTable().iterator();
-    iterator.seekToFirst();
-    int count = dirKeys.size();
-    Assert.assertEquals("Unexpected directory table entries!", 4, count);
-    while (iterator.hasNext()) {
-      count--;
-      Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
-      verifyKeyFormat(value.getKey(), dirKeys);
-    }
-    Assert.assertEquals("Unexpected directory table entries!", 0, count);
-
-    // verify entries in key table
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmKeyInfo>> keyTableItr =
-            omMgr.getKeyTable().iterator();
-    while (keyTableItr.hasNext()) {
-      fail("Shouldn't add any entries in KeyTable!");
-    }
-
     // create sub-dirs under same parent
     Path subDir5 = new Path("/d1/d2/d3/d4/d5");
     fs.mkdirs(subDir5);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index c830e07..09118b9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -599,10 +599,17 @@ public class TestOzoneFileSystem {
     ArrayList<String> actualPathList = new ArrayList<>();
     if (rootItemCount != fileStatuses.length) {
       for (int i = 0; i < fileStatuses.length; i++) {
-        actualPaths.add(fileStatuses[i].getPath().getName());
+        boolean duplicate =
+                actualPaths.add(fileStatuses[i].getPath().getName());
+        if (!duplicate) {
+          LOG.info("Duplicate path:{} in FileStatusList",
+                  fileStatuses[i].getPath().getName());
+        }
         actualPathList.add(fileStatuses[i].getPath().getName());
       }
       if (rootItemCount != actualPathList.size()) {
+        LOG.info("actualPathsSize: {}", actualPaths.size());
+        LOG.info("actualPathListSize: {}", actualPathList.size());
         actualPaths.removeAll(paths);
         actualPathList.removeAll(paths);
         LOG.info("actualPaths: {}", actualPaths);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index ee127cf..b88bbc3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -17,17 +17,23 @@
 package org.apache.hadoop.ozone.om;
 
 import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.KeyOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
@@ -36,6 +42,7 @@ import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
 import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -45,6 +52,8 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.UUID;
 
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -55,6 +64,9 @@ public class TestObjectStoreV1 {
   private static String clusterId;
   private static String scmId;
   private static String omId;
+  private static String volumeName;
+  private static String bucketName;
+  private static FileSystem fs;
 
   @Rule
   public Timeout timeout = new Timeout(240000);
@@ -78,12 +90,51 @@ public class TestObjectStoreV1 {
             .setOmId(omId)
             .build();
     cluster.waitForClusterToBeReady();
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+            bucket.getVolumeName());
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+    fs = FileSystem.get(conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    deleteRootDir();
+  }
+
+  /**
+   * Cleanup files and directories.
+   *
+   * @throws IOException DB failure
+   */
+  private void deleteRootDir() throws IOException {
+    Path root = new Path("/");
+    FileStatus[] fileStatuses = fs.listStatus(root);
+
+    if (fileStatuses == null) {
+      return;
+    }
+
+    for (FileStatus fStatus : fileStatuses) {
+      fs.delete(fStatus.getPath(), true);
+    }
+
+    fileStatuses = fs.listStatus(root);
+    if (fileStatuses != null) {
+      Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
+    }
   }
 
   @Test
   public void testCreateKey() throws Exception {
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
     String parent = "a/b/c/";
     String file = "key" + RandomStringUtils.randomNumeric(5);
     String key = parent + file;
@@ -91,74 +142,67 @@ public class TestObjectStoreV1 {
     OzoneClient client = cluster.getClient();
 
     ObjectStore objectStore = client.getObjectStore();
-    objectStore.createVolume(volumeName);
-
     OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
     Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
-    ozoneVolume.createBucket(bucketName);
-
     OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
     Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
 
-    Table<String, OmKeyInfo> openKeyTable =
+    Table<String, OmKeyInfo> openFileTable =
             cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
 
     // before file creation
-    verifyKeyInFileTable(openKeyTable, file, 0, true);
+    verifyKeyInFileTable(openFileTable, file, 0, true);
 
     String data = "random data";
     OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
             data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
             new HashMap<>());
 
-    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    KeyOutputStream keyOutputStream =
+            (KeyOutputStream) ozoneOutputStream.getOutputStream();
+    long clientID = keyOutputStream.getClientID();
+
+    OmDirectoryInfo dirPathC = getDirInfo(parent);
     Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
 
     // after file creation
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            false);
+    verifyKeyInOpenFileTable(openFileTable, clientID, file,
+            dirPathC.getObjectID(), false);
 
     ozoneOutputStream.write(data.getBytes(), 0, data.length());
     ozoneOutputStream.close();
 
-    Table<String, OmKeyInfo> keyTable =
+    Table<String, OmKeyInfo> fileTable =
             cluster.getOzoneManager().getMetadataManager().getKeyTable();
-
     // After closing the file. File entry should be removed from openFileTable
     // and it should be added to fileTable.
-    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            true);
+    verifyKeyInFileTable(fileTable, file, dirPathC.getObjectID(), false);
+    verifyKeyInOpenFileTable(openFileTable, clientID, file,
+            dirPathC.getObjectID(), true);
 
     ozoneBucket.deleteKey(key);
 
     // after key delete
-    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), true);
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            true);
+    verifyKeyInFileTable(fileTable, file, dirPathC.getObjectID(), true);
+    verifyKeyInOpenFileTable(openFileTable, clientID, file,
+            dirPathC.getObjectID(), true);
   }
 
   @Test
   public void testLookupKey() throws Exception {
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
     String parent = "a/b/c/";
-    String file = "key" + RandomStringUtils.randomNumeric(5);
-    String key = parent + file;
+    String fileName = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + fileName;
 
     OzoneClient client = cluster.getClient();
 
     ObjectStore objectStore = client.getObjectStore();
-    objectStore.createVolume(volumeName);
-
     OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
     Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
-    ozoneVolume.createBucket(bucketName);
-
     OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
     Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
 
-    Table<String, OmKeyInfo> openKeyTable =
+    Table<String, OmKeyInfo> openFileTable =
             cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
 
     String data = "random data";
@@ -166,19 +210,23 @@ public class TestObjectStoreV1 {
             data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
             new HashMap<>());
 
-    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    KeyOutputStream keyOutputStream =
+            (KeyOutputStream) ozoneOutputStream.getOutputStream();
+    long clientID = keyOutputStream.getClientID();
+
+    OmDirectoryInfo dirPathC = getDirInfo(parent);
     Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
 
     // after file creation
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            false);
+    verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
+            dirPathC.getObjectID(), false);
 
     ozoneOutputStream.write(data.getBytes(), 0, data.length());
 
     // open key
     try {
       ozoneBucket.getKey(key);
-      fail("Should throw exception as file is not visible and its still " +
+      fail("Should throw exception as fileName is not visible and its still " +
               "open for writing!");
     } catch (OMException ome) {
       // expected
@@ -190,34 +238,33 @@ public class TestObjectStoreV1 {
     OzoneKeyDetails keyDetails = ozoneBucket.getKey(key);
     Assert.assertEquals(key, keyDetails.getName());
 
-    Table<String, OmKeyInfo> keyTable =
+    Table<String, OmKeyInfo> fileTable =
             cluster.getOzoneManager().getMetadataManager().getKeyTable();
 
     // When closing the key, entry should be removed from openFileTable
     // and it should be added to fileTable.
-    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            true);
+    verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), false);
+    verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
+            dirPathC.getObjectID(), true);
 
     ozoneBucket.deleteKey(key);
 
     // get deleted key
     try {
       ozoneBucket.getKey(key);
-      fail("Should throw exception as file not exists!");
+      fail("Should throw exception as fileName not exists!");
     } catch (OMException ome) {
       // expected
       assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
     }
 
     // after key delete
-    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), true);
-    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
-            true);
+    verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), true);
+    verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
+            dirPathC.getObjectID(), true);
   }
 
-  private OmDirectoryInfo getDirInfo(String volumeName, String bucketName,
-      String parentKey) throws Exception {
+  private OmDirectoryInfo getDirInfo(String parentKey) throws Exception {
     OMMetadataManager omMetadataManager =
             cluster.getOzoneManager().getMetadataManager();
     long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
@@ -238,51 +285,38 @@ public class TestObjectStoreV1 {
 
   private void verifyKeyInFileTable(Table<String, OmKeyInfo> fileTable,
       String fileName, long parentID, boolean isEmpty) throws IOException {
-    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator
-            = fileTable.iterator();
 
+    String dbFileKey = parentID + OM_KEY_PREFIX + fileName;
+    OmKeyInfo omKeyInfo = fileTable.get(dbFileKey);
     if (isEmpty) {
-      Assert.assertTrue("Table is not empty!", fileTable.isEmpty());
+      Assert.assertNull("Table is not empty!", omKeyInfo);
     } else {
-      Assert.assertFalse("Table is empty!", fileTable.isEmpty());
-      while (iterator.hasNext()) {
-        Table.KeyValue<String, OmKeyInfo> next = iterator.next();
-        Assert.assertEquals("Invalid Key: " + next.getKey(),
-                parentID + "/" + fileName, next.getKey());
-        OmKeyInfo omKeyInfo = next.getValue();
-        Assert.assertEquals("Invalid Key", fileName,
-                omKeyInfo.getFileName());
-        Assert.assertEquals("Invalid Key", fileName,
-                omKeyInfo.getKeyName());
-        Assert.assertEquals("Invalid Key", parentID,
-                omKeyInfo.getParentObjectID());
-      }
+      Assert.assertNotNull("Table is empty!", omKeyInfo);
+      // used startsWith because the key format is,
+      // <parentID>/fileName/<clientID> and clientID is not visible.
+      Assert.assertEquals("Invalid Key: " + omKeyInfo.getObjectInfo(),
+              omKeyInfo.getKeyName(), fileName);
+      Assert.assertEquals("Invalid Key", parentID,
+              omKeyInfo.getParentObjectID());
     }
   }
 
   private void verifyKeyInOpenFileTable(Table<String, OmKeyInfo> openFileTable,
-      String fileName, long parentID, boolean isEmpty) throws IOException {
-    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator
-            = openFileTable.iterator();
-
+      long clientID, String fileName, long parentID, boolean isEmpty)
+          throws IOException {
+    String dbOpenFileKey =
+            parentID + OM_KEY_PREFIX + fileName + OM_KEY_PREFIX + clientID;
+    OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
     if (isEmpty) {
-      Assert.assertTrue("Table is not empty!", openFileTable.isEmpty());
+      Assert.assertNull("Table is not empty!", omKeyInfo);
     } else {
-      Assert.assertFalse("Table is empty!", openFileTable.isEmpty());
-      while (iterator.hasNext()) {
-        Table.KeyValue<String, OmKeyInfo> next = iterator.next();
-        // used startsWith because the key format is,
-        // <parentID>/fileName/<clientID> and clientID is not visible.
-        Assert.assertTrue("Invalid Key: " + next.getKey(),
-                next.getKey().startsWith(parentID + "/" + fileName));
-        OmKeyInfo omKeyInfo = next.getValue();
-        Assert.assertEquals("Invalid Key", fileName,
-                omKeyInfo.getFileName());
-        Assert.assertEquals("Invalid Key", fileName,
-                omKeyInfo.getKeyName());
-        Assert.assertEquals("Invalid Key", parentID,
-                omKeyInfo.getParentObjectID());
-      }
+      Assert.assertNotNull("Table is empty!", omKeyInfo);
+      // used startsWith because the key format is,
+      // <parentID>/fileName/<clientID> and clientID is not visible.
+      Assert.assertEquals("Invalid Key: " + omKeyInfo.getObjectInfo(),
+              omKeyInfo.getKeyName(), fileName);
+      Assert.assertEquals("Invalid Key", parentID,
+              omKeyInfo.getParentObjectID());
     }
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index db28ff7..055ab13 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -31,7 +31,6 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
-import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
@@ -2320,6 +2319,7 @@ public class KeyManagerImpl implements KeyManager {
     return fileStatusList;
   }
 
+  @SuppressWarnings("methodlength")
   public List<OzoneFileStatus> listStatusV1(OmKeyArgs args, boolean recursive,
       String startKey, long numEntries, String clientAddress)
           throws IOException {
@@ -2327,10 +2327,32 @@ public class KeyManagerImpl implements KeyManager {
 
     // unsorted OMKeyInfo list contains combine results from TableCache and DB.
     List<OzoneFileStatus> fileStatusFinalList = new ArrayList<>();
-    LinkedHashSet<OzoneFileStatus> fileStatusList = new LinkedHashSet<>();
+
     if (numEntries <= 0) {
       return fileStatusFinalList;
     }
+
+    /**
+     * A map sorted by OmKey to combine results from TableCache and DB for
+     * each entity - Dir & File.
+     *
+     * Two separate maps are required because the order of seek -> (1)Seek
+     * files in fileTable (2)Seek dirs in dirTable.
+     *
+     * StartKey should be added to the final listStatuses, so if we combine
+     * files and dirs into a single map then directory with lower precedence
+     * will appear at the top of the list even if the startKey is given as
+     * fileName.
+     *
+     * For example, startKey="a/file1". As per the seek order, first fetches
+     * all the files and then it will start seeking all the directories.
+     * Assume a directory name exists "a/b". With one map, the sorted list will
+     * be ["a/b", "a/file1"]. But the expected list is: ["a/file1", "a/b"],
+     * startKey element should always be at the top of the listStatuses.
+     */
+    TreeMap<String, OzoneFileStatus> cacheFileMap = new TreeMap<>();
+    TreeMap<String, OzoneFileStatus> cacheDirMap = new TreeMap<>();
+
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
@@ -2373,12 +2395,12 @@ public class KeyManagerImpl implements KeyManager {
         seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
         seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
 
-        // Order of seek -> (1)Seek dirs in dirTable (2)Seek files in fileTable
+        // Order of seek -> (1)Seek files in fileTable (2)Seek dirs in dirTable
         // 1. Seek the given key in key table.
-        countEntries = getFilesFromDirectory(fileStatusList, seekFileInDB,
+        countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB,
                 prefixPath, prefixKeyInDB, startKey, countEntries, numEntries);
         // 2. Seek the given key in dir table.
-        getDirectories(fileStatusList, seekDirInDB, prefixPath, prefixKeyInDB,
+        getDirectories(cacheDirMap, seekDirInDB, prefixPath, prefixKeyInDB,
                 startKey, countEntries, numEntries, volumeName, bucketName,
                 recursive);
       } else {
@@ -2420,7 +2442,7 @@ public class KeyManagerImpl implements KeyManager {
             // dirTable. So, its not required to search again in the fileTable.
 
             // Seek the given key in dirTable.
-            getDirectories(fileStatusList, seekDirInDB, prefixPath,
+            getDirectories(cacheDirMap, seekDirInDB, prefixPath,
                     prefixKeyInDB, startKey, countEntries, numEntries,
                     volumeName, bucketName, recursive);
           } else {
@@ -2430,11 +2452,11 @@ public class KeyManagerImpl implements KeyManager {
             seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
 
             // 1. Seek the given key in key table.
-            countEntries = getFilesFromDirectory(fileStatusList, seekFileInDB,
+            countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB,
                     prefixPath, prefixKeyInDB, startKey, countEntries,
                     numEntries);
             // 2. Seek the given key in dir table.
-            getDirectories(fileStatusList, seekDirInDB, prefixPath,
+            getDirectories(cacheDirMap, seekDirInDB, prefixPath,
                     prefixKeyInDB, startKey, countEntries, numEntries,
                     volumeName, bucketName, recursive);
           }
@@ -2451,12 +2473,16 @@ public class KeyManagerImpl implements KeyManager {
       metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
               bucketName);
     }
-    List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
-    for (OzoneFileStatus fileStatus : fileStatusList) {
-      if (fileStatus.isFile()) {
-        keyInfoList.add(fileStatus.getKeyInfo());
-      }
+
+    List<OmKeyInfo> keyInfoList = new ArrayList<>();
+    for (OzoneFileStatus fileStatus : cacheFileMap.values()) {
+      fileStatusFinalList.add(fileStatus);
+      keyInfoList.add(fileStatus.getKeyInfo());
+    }
+    for (OzoneFileStatus fileStatus : cacheDirMap.values()) {
+      fileStatusFinalList.add(fileStatus);
     }
+
     // refreshPipeline flag check has been removed as part of
     // https://issues.apache.org/jira/browse/HDDS-3658.
     // Please refer this jira for more details.
@@ -2464,20 +2490,23 @@ public class KeyManagerImpl implements KeyManager {
     if (args.getSortDatanodes()) {
       sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
     }
-    fileStatusFinalList.addAll(fileStatusList);
     return fileStatusFinalList;
   }
 
   @SuppressWarnings("parameternumber")
-  protected int getDirectories(Set<OzoneFileStatus> fileStatusList,
+  protected int getDirectories(
+      TreeMap<String, OzoneFileStatus> cacheKeyMap,
       String seekDirInDB, String prefixPath, long prefixKeyInDB,
       String startKey, int countEntries, long numEntries, String volumeName,
       String bucketName, boolean recursive) throws IOException {
 
+    // A set to keep track of keys deleted in cache but not flushed to DB.
+    Set<String> deletedKeySet = new TreeSet<>();
+
     Table dirTable = metadataManager.getDirectoryTable();
-    countEntries = listStatusFindDirsInTableCache(fileStatusList, dirTable,
+    countEntries = listStatusFindDirsInTableCache(cacheKeyMap, dirTable,
             prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName,
-            bucketName, countEntries, numEntries);
+            bucketName, countEntries, numEntries, deletedKeySet);
     TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
             iterator = dirTable.iterator();
 
@@ -2485,6 +2514,11 @@ public class KeyManagerImpl implements KeyManager {
 
     while (iterator.hasNext() && numEntries - countEntries > 0) {
       OmDirectoryInfo dirInfo = iterator.value().getValue();
+      if (deletedKeySet.contains(dirInfo.getPath())) {
+        iterator.next(); // move to next entry in the table
+        // entry is actually deleted in cache and can exists in DB
+        continue;
+      }
       if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
               prefixKeyInDB)) {
         break;
@@ -2496,7 +2530,7 @@ public class KeyManagerImpl implements KeyManager {
                 dirInfo.getName());
         OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
                 bucketName, dirInfo, dirName);
-        fileStatusList.add(new OzoneFileStatus(omKeyInfo, scmBlockSize,
+        cacheKeyMap.put(dirName, new OzoneFileStatus(omKeyInfo, scmBlockSize,
                 true));
         countEntries++;
       }
@@ -2507,20 +2541,28 @@ public class KeyManagerImpl implements KeyManager {
     return countEntries;
   }
 
-  private int getFilesFromDirectory(Set<OzoneFileStatus> fileStatusList,
+  private int getFilesFromDirectory(
+      TreeMap<String, OzoneFileStatus> cacheKeyMap,
       String seekKeyInDB, String prefixKeyPath, long prefixKeyInDB,
       String startKey, int countEntries, long numEntries) throws IOException {
 
+    // A set to keep track of keys deleted in cache but not flushed to DB.
+    Set<String> deletedKeySet = new TreeSet<>();
+
     Table<String, OmKeyInfo> keyTable = metadataManager.getKeyTable();
-    countEntries = listStatusFindFilesInTableCache(fileStatusList, keyTable,
+    countEntries = listStatusFindFilesInTableCache(cacheKeyMap, keyTable,
             prefixKeyInDB, seekKeyInDB, prefixKeyPath, startKey,
-            countEntries, numEntries);
+            countEntries, numEntries, deletedKeySet);
     TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
             iterator = keyTable.iterator();
     iterator.seek(seekKeyInDB);
     while (iterator.hasNext() && numEntries - countEntries > 0) {
       OmKeyInfo keyInfo = iterator.value().getValue();
-
+      if (deletedKeySet.contains(keyInfo.getPath())) {
+        iterator.next(); // move to next entry in the table
+        // entry is actually deleted in cache and can exists in DB
+        continue;
+      }
       if (!OMFileRequest.isImmediateChild(keyInfo.getParentObjectID(),
               prefixKeyInDB)) {
         break;
@@ -2530,7 +2572,8 @@ public class KeyManagerImpl implements KeyManager {
       String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
               keyInfo.getKeyName());
       keyInfo.setKeyName(fullKeyPath);
-      fileStatusList.add(new OzoneFileStatus(keyInfo, scmBlockSize, false));
+      cacheKeyMap.put(fullKeyPath,
+              new OzoneFileStatus(keyInfo, scmBlockSize, false));
       countEntries++;
       iterator.next(); // move to next entry in the table
     }
@@ -2542,10 +2585,10 @@ public class KeyManagerImpl implements KeyManager {
    */
   @SuppressWarnings("parameternumber")
   private int listStatusFindFilesInTableCache(
-          Set<OzoneFileStatus> fileStatusList, Table<String,
+          TreeMap<String, OzoneFileStatus> cacheKeyMap, Table<String,
           OmKeyInfo> keyTable, long prefixKeyInDB, String seekKeyInDB,
           String prefixKeyPath, String startKey, int countEntries,
-          long numEntries) {
+          long numEntries, Set<String> deletedKeySet) {
 
     Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
             cacheIter = keyTable.cacheIterator();
@@ -2558,6 +2601,7 @@ public class KeyManagerImpl implements KeyManager {
       OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue();
       // cacheOmKeyInfo is null if an entry is deleted in cache
       if(cacheOmKeyInfo == null){
+        deletedKeySet.add(cacheKey);
         continue;
       }
 
@@ -2571,7 +2615,7 @@ public class KeyManagerImpl implements KeyManager {
               omKeyInfo.getKeyName());
       omKeyInfo.setKeyName(fullKeyPath);
 
-      countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
+      countEntries = addKeyInfoToFileStatusList(cacheKeyMap, prefixKeyInDB,
               seekKeyInDB, startKey, countEntries, cacheKey, omKeyInfo,
               false);
     }
@@ -2583,10 +2627,11 @@ public class KeyManagerImpl implements KeyManager {
    */
   @SuppressWarnings("parameternumber")
   private int listStatusFindDirsInTableCache(
-          Set<OzoneFileStatus> fileStatusList, Table<String,
+          TreeMap<String, OzoneFileStatus> cacheKeyMap, Table<String,
           OmDirectoryInfo> dirTable, long prefixKeyInDB, String seekKeyInDB,
           String prefixKeyPath, String startKey, String volumeName,
-          String bucketName, int countEntries, long numEntries) {
+          String bucketName, int countEntries, long numEntries,
+          Set<String> deletedKeySet) {
 
     Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
             cacheIter = dirTable.cacheIterator();
@@ -2599,7 +2644,9 @@ public class KeyManagerImpl implements KeyManager {
               cacheIter.next();
       String cacheKey = entry.getKey().getCacheKey();
       OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue();
+      // cacheOmKeyInfo is null if an entry is deleted in cache
       if(cacheOmDirInfo == null){
+        deletedKeySet.add(cacheKey);
         continue;
       }
       String fullDirPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
@@ -2607,7 +2654,7 @@ public class KeyManagerImpl implements KeyManager {
       OmKeyInfo cacheDirKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
               bucketName, cacheOmDirInfo, fullDirPath);
 
-      countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
+      countEntries = addKeyInfoToFileStatusList(cacheKeyMap, prefixKeyInDB,
               seekKeyInDB, startKey, countEntries, cacheKey, cacheDirKeyInfo,
               true);
     }
@@ -2615,7 +2662,8 @@ public class KeyManagerImpl implements KeyManager {
   }
 
   @SuppressWarnings("parameternumber")
-  private int addKeyInfoToFileStatusList(Set<OzoneFileStatus> fileStatusList,
+  private int addKeyInfoToFileStatusList(
+      TreeMap<String, OzoneFileStatus> cacheKeyMap,
       long prefixKeyInDB, String seekKeyInDB, String startKey,
       int countEntries, String cacheKey, OmKeyInfo cacheOmKeyInfo,
       boolean isDirectory) {
@@ -2627,7 +2675,7 @@ public class KeyManagerImpl implements KeyManager {
       if (cacheKey.startsWith(seekKeyInDB)) {
         OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
                 scmBlockSize, isDirectory);
-        fileStatusList.add(fileStatus);
+        cacheKeyMap.put(cacheOmKeyInfo.getKeyName(), fileStatus);
         countEntries++;
       }
     } else {
@@ -2641,7 +2689,7 @@ public class KeyManagerImpl implements KeyManager {
               cacheKey.compareTo(seekKeyInDB) >= 0) {
         OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
                 scmBlockSize, isDirectory);
-        fileStatusList.add(fileStatus);
+        cacheKeyMap.put(cacheOmKeyInfo.getKeyName(), fileStatus);
         countEntries++;
       }
     }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
index af3bc82..af5c4df 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
@@ -119,6 +120,11 @@ public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest {
       }
 
       OmKeyInfo omKeyInfo = keyStatus.getKeyInfo();
+      // New key format for the fileTable & dirTable.
+      // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+      // keyName field stores only the leaf node name, which is 'file1'.
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      omKeyInfo.setKeyName(fileName);
 
       // Set the UpdateID to current transactionLogIndex
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 17/29: HDDS-4742. Make trash work with FS Optimised Buckets. (#1915)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 4cb5091515a1fe6bb266da1ec701369932a28b5b
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Tue Feb 16 22:07:12 2021 +0530

    HDDS-4742. Make trash work with FS Optimised Buckets. (#1915)
---
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  25 ++++
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     |  12 --
 .../hadoop/ozone/om/TrashOzoneFileSystem.java      | 142 ++++++++++++++++-----
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |  15 +--
 4 files changed, 135 insertions(+), 59 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index e9d4cf9..e7e2eb0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -18,10 +18,12 @@
 package org.apache.hadoop.ozone.om.helpers;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.util.StringUtils;
 
 import javax.annotation.Nonnull;
 import java.nio.file.Paths;
+import java.util.Map;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
@@ -205,4 +207,27 @@ public final class OzoneFSUtils {
     java.nio.file.Path keyPath = Paths.get(keyName);
     return keyPath.getNameCount();
   }
+
+
+  /**
+   * Returns true if the bucket is FS Optimised.
+   * @param bucketMetadata
+   * @return
+   */
+  public static boolean isFSOptimizedBucket(
+      Map<String, String> bucketMetadata) {
+    // layout version V1 represents optimized FS path
+    boolean layoutVersionEnabled =
+        org.apache.commons.lang3.StringUtils.equalsIgnoreCase(
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1,
+            bucketMetadata
+                .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION));
+
+    boolean fsEnabled =
+        Boolean.parseBoolean(bucketMetadata
+            .get(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS));
+
+    return layoutVersionEnabled && fsEnabled;
+  }
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index 03846ae..eb7eaca 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -383,18 +383,6 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   @Override
   @Test
   @Ignore("TODO:HDDS-2939")
-  public void testTrash() throws Exception {
-  }
-
-  @Override
-  @Test
-  @Ignore("TODO:HDDS-2939")
-  public void testRenameToTrashEnabled() throws Exception {
-  }
-
-  @Override
-  @Test
-  @Ignore("TODO:HDDS-2939")
   public void testListStatusWithIntermediateDir() throws Exception {
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
index 6d7a88a..a9408a8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
@@ -35,10 +35,11 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 import org.apache.ratis.protocol.ClientId;
@@ -154,6 +155,11 @@ public class TrashOzoneFileSystem extends FileSystem {
     // check whether the src and dst belong to the same bucket & trashroot.
     OFSPath srcPath = new OFSPath(src);
     OFSPath dstPath = new OFSPath(dst);
+    OmBucketInfo bucket = ozoneManager.getBucketInfo(srcPath.getVolumeName(),
+        srcPath.getBucketName());
+    if (OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata())) {
+      return renameV1(srcPath, dstPath);
+    }
     Preconditions.checkArgument(srcPath.getBucketName().
         equals(dstPath.getBucketName()));
     Preconditions.checkArgument(srcPath.getTrashRoot().
@@ -163,14 +169,50 @@ public class TrashOzoneFileSystem extends FileSystem {
     return true;
   }
 
+  private boolean renameV1(OFSPath srcPath, OFSPath dstPath) {
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        getRenameKeyRequest(srcPath, dstPath);
+    try {
+      if(omRequest != null) {
+        submitRequest(omRequest);
+        return true;
+      }
+      return false;
+    } catch (Exception e){
+      LOG.error("couldnt send rename requestV1", e);
+      return false;
+    }
+  }
+
   @Override
   public boolean delete(Path path, boolean b) throws IOException {
     ozoneManager.getMetrics().incNumTrashDeletes();
+    OFSPath srcPath = new OFSPath(path);
+    OmBucketInfo bucket = ozoneManager.getBucketInfo(srcPath.getVolumeName(),
+        srcPath.getBucketName());
+    if (OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata())) {
+      return deleteV1(srcPath);
+    }
     DeleteIterator iterator = new DeleteIterator(path, true);
     iterator.iterate();
     return true;
   }
 
+  private boolean deleteV1(OFSPath srcPath) {
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        getDeleteKeyRequest(srcPath);
+    try {
+      if(omRequest != null) {
+        submitRequest(omRequest);
+        return true;
+      }
+      return false;
+    } catch (Throwable e) {
+      LOG.error("Couldn't send delete request.", e);
+      return false;
+    }
+  }
+
   @Override
   public FileStatus[] listStatus(Path path) throws  IOException {
     ozoneManager.getMetrics().incNumTrashListStatus();
@@ -377,6 +419,41 @@ public class TrashOzoneFileSystem extends FileSystem {
     }
   }
 
+
+  private OzoneManagerProtocolProtos.OMRequest
+      getRenameKeyRequest(
+      OFSPath src, OFSPath dst) {
+    String volumeName = src.getVolumeName();
+    String bucketName = src.getBucketName();
+    String keyName = src.getKeyName();
+
+    OzoneManagerProtocolProtos.KeyArgs keyArgs =
+        OzoneManagerProtocolProtos.KeyArgs.newBuilder()
+            .setKeyName(keyName)
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .build();
+    String toKeyName = dst.getKeyName();
+    OzoneManagerProtocolProtos.RenameKeyRequest renameKeyRequest =
+        OzoneManagerProtocolProtos.RenameKeyRequest.newBuilder()
+            .setKeyArgs(keyArgs)
+            .setToKeyName(toKeyName)
+            .build();
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        null;
+    try {
+      omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder()
+              .setClientId(CLIENT_ID.toString())
+              .setUserInfo(getUserInfo())
+              .setRenameKeyRequest(renameKeyRequest)
+              .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
+              .build();
+    } catch (IOException e) {
+      LOG.error("Couldn't get userinfo", e);
+    }
+    return omRequest;
+  }
+
   private class RenameIterator extends OzoneListingIterator {
     private final String srcPath;
     private final String dstPath;
@@ -408,40 +485,37 @@ public class TrashOzoneFileSystem extends FileSystem {
       }
       return true;
     }
+  }
 
-    private OzoneManagerProtocolProtos.OMRequest
-        getRenameKeyRequest(
-        OFSPath src, OFSPath dst) {
-      String volumeName = src.getVolumeName();
-      String bucketName = src.getBucketName();
-      String keyName = src.getKeyName();
-
-      OzoneManagerProtocolProtos.KeyArgs keyArgs =
-          OzoneManagerProtocolProtos.KeyArgs.newBuilder()
-              .setKeyName(keyName)
-              .setVolumeName(volumeName)
-              .setBucketName(bucketName)
-              .build();
-      String toKeyName = dst.getKeyName();
-      OzoneManagerProtocolProtos.RenameKeyRequest renameKeyRequest =
-          OzoneManagerProtocolProtos.RenameKeyRequest.newBuilder()
-              .setKeyArgs(keyArgs)
-              .setToKeyName(toKeyName)
-              .build();
-      OzoneManagerProtocolProtos.OMRequest omRequest =
-          null;
-      try {
-        omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder()
-            .setClientId(CLIENT_ID.toString())
-            .setUserInfo(getUserInfo())
-            .setRenameKeyRequest(renameKeyRequest)
-            .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
+  private OzoneManagerProtocolProtos.OMRequest getDeleteKeyRequest(
+      OFSPath srcPath) {
+    String volume = srcPath.getVolumeName();
+    String bucket = srcPath.getBucketName();
+    String key  = srcPath.getKeyName();
+    OzoneManagerProtocolProtos.KeyArgs keyArgs =
+        OzoneManagerProtocolProtos.KeyArgs.newBuilder()
+            .setKeyName(key)
+            .setVolumeName(volume)
+            .setBucketName(bucket)
+            .setRecursive(true)
             .build();
-      } catch (IOException e) {
-        LOG.error("Couldn't get userinfo", e);
-      }
-      return omRequest;
+    OzoneManagerProtocolProtos.DeleteKeyRequest deleteKeyRequest =
+        OzoneManagerProtocolProtos.DeleteKeyRequest.newBuilder()
+            .setKeyArgs(keyArgs).build();
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        null;
+    try {
+      omRequest =
+          OzoneManagerProtocolProtos.OMRequest.newBuilder()
+              .setClientId(CLIENT_ID.toString())
+              .setUserInfo(getUserInfo())
+              .setDeleteKeyRequest(deleteKeyRequest)
+              .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
+              .build();
+    } catch (IOException e) {
+      LOG.error("Couldn't get userinfo", e);
     }
+    return omRequest;
   }
 
   private class DeleteIterator extends OzoneListingIterator {
@@ -467,7 +541,7 @@ public class TrashOzoneFileSystem extends FileSystem {
       for (String keyPath : keyPathList) {
         OFSPath path = new OFSPath(keyPath);
         OzoneManagerProtocolProtos.OMRequest omRequest =
-            getDeleteKeyRequest(path);
+            getDeleteKeysRequest(path);
         try {
           ozoneManager.getMetrics().incNumTrashFilesDeletes();
           submitRequest(omRequest);
@@ -479,7 +553,7 @@ public class TrashOzoneFileSystem extends FileSystem {
     }
 
     private OzoneManagerProtocolProtos.OMRequest
-        getDeleteKeyRequest(
+        getDeleteKeysRequest(
         OFSPath keyPath) {
       String volumeName = keyPath.getVolumeName();
       String bucketName = keyPath.getBucketName();
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 150108c..125934e 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -48,12 +48,12 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenRenewer;
@@ -548,17 +548,6 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
 
   @Override
   public boolean isFSOptimizedBucket() {
-    // layout version V1 represents optimized FS path
-    boolean layoutVersionEnabled =
-            StringUtils.equalsIgnoreCase(
-                    OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1,
-                    bucket.getMetadata()
-                            .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION));
-
-    boolean fsEnabled =
-            Boolean.parseBoolean(bucket.getMetadata()
-                    .get(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS));
-
-    return layoutVersionEnabled && fsEnabled;
+    return OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata());
   }
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 16/29: HDDS-4781. [FSO]S3MultiPart: Implement create and commit upload part file (#1897)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 81a3e81f4de4b1cf174c15c3a1332df3e7e7611b
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Feb 10 15:00:32 2021 +0530

    HDDS-4781. [FSO]S3MultiPart: Implement create and commit upload part file (#1897)
---
 .../apache/hadoop/fs/ozone/TestOzoneFileOps.java   |  67 ------
 .../rpc/TestOzoneClientMultipartUploadV1.java      |  93 +++++++++
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |  14 +-
 .../S3InitiateMultipartUploadRequest.java          |  12 +-
 .../S3InitiateMultipartUploadRequestV1.java        |  25 +--
 .../S3MultipartUploadCommitPartRequest.java        |  35 +++-
 ...a => S3MultipartUploadCommitPartRequestV1.java} | 121 ++++-------
 .../S3MultipartUploadCommitPartResponseV1.java     |  66 ++++++
 .../s3/multipart/TestS3MultipartRequest.java       |  14 +-
 .../TestS3MultipartUploadCommitPartRequest.java    |  62 ++++--
 .../TestS3MultipartUploadCommitPartRequestV1.java  | 104 ++++++++++
 .../s3/multipart/TestS3MultipartResponse.java      |  76 +++++++
 .../TestS3MultipartUploadCommitPartResponseV1.java | 226 +++++++++++++++++++++
 14 files changed, 713 insertions(+), 206 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
index 176d0c4..147a9ce 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.TestDataUtil;
@@ -37,7 +35,6 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Assert;
@@ -125,34 +122,6 @@ public class TestOzoneFileOps {
             omMgr);
     openFileKey = d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
 
-    // verify entries in directory table
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmDirectoryInfo>> iterator =
-            omMgr.getDirectoryTable().iterator();
-    iterator.seekToFirst();
-    int count = dirKeys.size();
-    Assert.assertEquals("Unexpected directory table entries!", 2, count);
-    while (iterator.hasNext()) {
-      count--;
-      Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
-      verifyKeyFormat(value.getKey(), dirKeys);
-    }
-    Assert.assertEquals("Unexpected directory table entries!", 0, count);
-
-    // verify entries in open key table
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmKeyInfo>> keysItr =
-            omMgr.getOpenKeyTable().iterator();
-    keysItr.seekToFirst();
-
-    while (keysItr.hasNext()) {
-      count++;
-      Table.KeyValue<String, OmKeyInfo> value = keysItr.next();
-      verifyOpenKeyFormat(value.getKey(), openFileKey);
-      verifyOMFileInfoFormat(value.getValue(), file.getName(), d2ObjectID);
-    }
-    Assert.assertEquals("Unexpected file table entries!", 1, count);
-
     // trigger CommitKeyRequest
     outputStream.close();
 
@@ -183,42 +152,6 @@ public class TestOzoneFileOps {
             omKeyInfo.getPath());
   }
 
-  /**
-   * Verify key name format and the DB key existence in the expected dirKeys
-   * list.
-   *
-   * @param key     table keyName
-   * @param dirKeys expected keyName
-   */
-  private void verifyKeyFormat(String key, ArrayList<String> dirKeys) {
-    String[] keyParts = StringUtils.split(key,
-            OzoneConsts.OM_KEY_PREFIX.charAt(0));
-    Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
-    boolean removed = dirKeys.remove(key);
-    Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
-            removed);
-  }
-
-  /**
-   * Verify key name format and the DB key existence in the expected
-   * openFileKeys list.
-   *
-   * @param key          table keyName
-   * @param openFileKey expected keyName
-   */
-  private void verifyOpenKeyFormat(String key, String openFileKey) {
-    String[] keyParts = StringUtils.split(key,
-            OzoneConsts.OM_KEY_PREFIX.charAt(0));
-    Assert.assertEquals("Invalid KeyName:" + key, 3, keyParts.length);
-    String[] expectedOpenFileParts = StringUtils.split(openFileKey,
-            OzoneConsts.OM_KEY_PREFIX.charAt(0));
-    Assert.assertEquals("ParentId/Key:" + expectedOpenFileParts[0]
-                    + " doesn't exists in openFileTable!",
-            expectedOpenFileParts[0] + OzoneConsts.OM_KEY_PREFIX
-                    + expectedOpenFileParts[1],
-            keyParts[0] + OzoneConsts.OM_KEY_PREFIX + keyParts[1]);
-  }
-
   long verifyDirKey(long parentId, String dirKey, String absolutePath,
                     ArrayList<String> dirKeys, OMMetadataManager omMgr)
           throws Exception {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
index 93e5826..af241c5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
@@ -17,6 +17,7 @@
 
 package org.apache.hadoop.ozone.client.rpc;
 
+import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
@@ -24,8 +25,13 @@ import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 
@@ -179,4 +185,91 @@ public class TestOzoneClientMultipartUploadV1 {
     assertNotNull(multipartInfo.getUploadID());
   }
 
+  @Test
+  public void testUploadPartWithNoOverride() throws IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+    String sampleData = "sample Value";
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotNull(multipartInfo.getUploadID());
+
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            sampleData.length(), 1, uploadID);
+    ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
+            .getCommitUploadPartInfo();
+
+    assertNotNull(commitUploadPartInfo);
+    assertNotNull(commitUploadPartInfo.getPartName());
+  }
+
+  @Test
+  public void testUploadPartOverrideWithRatis() throws IOException {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+    String sampleData = "sample Value";
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            ReplicationType.RATIS, THREE);
+
+    assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotNull(multipartInfo.getUploadID());
+
+    int partNumber = 1;
+
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            sampleData.length(), partNumber, uploadID);
+    ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
+            .getCommitUploadPartInfo();
+
+    assertNotNull(commitUploadPartInfo);
+    String partName = commitUploadPartInfo.getPartName();
+    assertNotNull(commitUploadPartInfo.getPartName());
+
+    //Overwrite the part by creating part key with same part number.
+    sampleData = "sample Data Changed";
+    ozoneOutputStream = bucket.createMultipartKey(keyName,
+            sampleData.length(), partNumber, uploadID);
+    ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length());
+    ozoneOutputStream.close();
+
+    commitUploadPartInfo = ozoneOutputStream
+            .getCommitUploadPartInfo();
+
+    assertNotNull(commitUploadPartInfo);
+    assertNotNull(commitUploadPartInfo.getPartName());
+
+    // PartName should be different from old part Name.
+    assertNotEquals("Part names should be different", partName,
+            commitUploadPartInfo.getPartName());
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 08fc09f..6f6278a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUpload
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
 import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest;
 import org.apache.hadoop.ozone.om.request.security.OMCancelDelegationTokenRequest;
@@ -184,6 +185,9 @@ public final class OzoneManagerRatisUtils {
       }
       return new S3InitiateMultipartUploadRequest(omRequest);
     case CommitMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3MultipartUploadCommitPartRequestV1(omRequest);
+      }
       return new S3MultipartUploadCommitPartRequest(omRequest);
     case AbortMultiPartUpload:
       return new S3MultipartUploadAbortRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 65fbb4b..4c4d097 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -692,9 +692,17 @@ public abstract class OMKeyRequest extends OMClientRequest {
     // error no such multipart upload.
     String uploadID = args.getMultipartUploadID();
     Preconditions.checkNotNull(uploadID);
-    String multipartKey = omMetadataManager
-            .getMultipartKey(args.getVolumeName(), args.getBucketName(),
-                    args.getKeyName(), uploadID);
+    String multipartKey = "";
+    if (omPathInfo != null) {
+      // FileTable metadata format
+      multipartKey = omMetadataManager.getMultipartKey(
+              omPathInfo.getLastKnownParentId(),
+              omPathInfo.getLeafNodeName(), uploadID);
+    } else {
+      multipartKey = omMetadataManager
+              .getMultipartKey(args.getVolumeName(), args.getBucketName(),
+                      args.getKeyName(), uploadID);
+    }
     OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
             multipartKey);
     if (partKeyInfo == null) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index 4a42f5f..fb15fe9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -225,7 +225,17 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
             volumeName, bucketName);
       }
     }
+    logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName,
+            bucketName, keyName, exception, result);
 
+    return omClientResponse;
+  }
+
+  @SuppressWarnings("parameternumber")
+  protected void logResult(OzoneManager ozoneManager,
+      MultipartInfoInitiateRequest multipartInfoInitiateRequest,
+      Map<String, String> auditMap, String volumeName, String bucketName,
+      String keyName, IOException exception, Result result) {
     // audit log
     auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
         OMAction.INITIATE_MULTIPART_UPLOAD, auditMap,
@@ -247,7 +257,5 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
       LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}",
           multipartInfoInitiateRequest);
     }
-
-    return omClientResponse;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
index 3507090..d472bc1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
@@ -22,7 +22,6 @@ import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -228,28 +227,8 @@ public class S3InitiateMultipartUploadRequestV1
             volumeName, bucketName);
       }
     }
-
-    // audit log
-    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.INITIATE_MULTIPART_UPLOAD, auditMap,
-        exception, getOmRequest().getUserInfo()));
-
-    switch (result) {
-    case SUCCESS:
-      LOG.debug("S3 InitiateMultipart Upload request for Key {} in " +
-              "Volume/Bucket {}/{} is successfully completed", keyName,
-          volumeName, bucketName);
-      break;
-    case FAILURE:
-      ozoneManager.getMetrics().incNumInitiateMultipartUploadFails();
-      LOG.error("S3 InitiateMultipart Upload request for Key {} in " +
-              "Volume/Bucket {}/{} is failed", keyName, volumeName, bucketName,
-          exception);
-      break;
-    default:
-      LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}",
-          multipartInfoInitiateRequest);
-    }
+    logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName,
+            bucketName, keyName, exception, result);
 
     return omClientResponse;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index e00cff6..aac03bb 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -134,16 +134,16 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
       String uploadID = keyArgs.getMultipartUploadID();
-      multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName,
-          keyName, uploadID);
+      multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+              omMetadataManager, uploadID);
 
       multipartKeyInfo = omMetadataManager.getMultipartInfoTable()
           .get(multipartKey);
 
       long clientID = multipartCommitUploadPartRequest.getClientID();
 
-      openKey = omMetadataManager.getOpenKey(
-          volumeName, bucketName, keyName, clientID);
+      openKey = getOpenKey(volumeName, bucketName, keyName, omMetadataManager,
+              clientID);
 
       String ozoneKey = omMetadataManager.getOzoneKey(
           volumeName, bucketName, keyName);
@@ -253,6 +253,31 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       }
     }
 
+    logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs,
+            auditMap, volumeName, bucketName, keyName, exception, partName,
+            result);
+
+    return omClientResponse;
+  }
+
+  private String getOpenKey(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager, long clientID) {
+    return omMetadataManager.getOpenKey(volumeName, bucketName,
+            keyName, clientID);
+  }
+
+  private String getMultipartKey(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager, String uploadID) {
+    return omMetadataManager.getMultipartKey(volumeName, bucketName,
+        keyName, uploadID);
+  }
+
+  @SuppressWarnings("parameternumber")
+  protected void logResult(OzoneManager ozoneManager,
+      MultipartCommitUploadPartRequest multipartCommitUploadPartRequest,
+      KeyArgs keyArgs, Map<String, String> auditMap, String volumeName,
+      String bucketName, String keyName, IOException exception,
+      String partName, Result result) {
     // audit log
     // Add MPU related information.
     auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER,
@@ -278,8 +303,6 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       LOG.error("Unrecognized Result for S3MultipartUploadCommitPartRequest: " +
           "{}", multipartCommitUploadPartRequest);
     }
-
-    return omClientResponse;
   }
 
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
similarity index 73%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
index e00cff6..5546010 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
 import com.google.common.base.Optional;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -28,32 +28,25 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.s3.multipart
-    .S3MultipartUploadCommitPartResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCommitPartResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartCommitUploadPartRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .MultipartCommitUploadPartResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.stream.Collectors;
 
@@ -63,31 +56,17 @@ import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_L
 /**
  * Handle Multipart upload commit upload part file.
  */
-public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
+public class S3MultipartUploadCommitPartRequestV1
+        extends S3MultipartUploadCommitPartRequest {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(S3MultipartUploadCommitPartRequest.class);
+      LoggerFactory.getLogger(S3MultipartUploadCommitPartRequestV1.class);
 
-  public S3MultipartUploadCommitPartRequest(OMRequest omRequest) {
+  public S3MultipartUploadCommitPartRequestV1(OMRequest omRequest) {
     super(omRequest);
   }
 
   @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    MultipartCommitUploadPartRequest multipartCommitUploadPartRequest =
-        getOmRequest().getCommitMultiPartUploadRequest();
-
-    KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs();
-    return getOmRequest().toBuilder().setCommitMultiPartUploadRequest(
-        multipartCommitUploadPartRequest.toBuilder()
-            .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now())
-                .setKeyName(validateAndNormalizeKey(
-                    ozoneManager.getEnableFileSystemPaths(),
-                    keyArgs.getKeyName()))))
-        .setUserInfo(getUserInfo()).build();
-  }
-
-  @Override
   @SuppressWarnings("methodlength")
   public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
       long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
@@ -124,35 +103,37 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       volumeName = keyArgs.getVolumeName();
       bucketName = keyArgs.getBucketName();
 
-      // check acl
-      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
-          IAccessAuthorizer.ACLType.WRITE, OzoneObj.ResourceType.KEY);
-
+      // TODO to support S3 ACL later.
       acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
           volumeName, bucketName);
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+      String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+      omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
+      long bucketId = omBucketInfo.getObjectID();
+      long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+              keyName, omMetadataManager);
+
       String uploadID = keyArgs.getMultipartUploadID();
-      multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName,
-          keyName, uploadID);
+      multipartKey = omMetadataManager.getMultipartKey(parentID,
+          fileName, uploadID);
 
       multipartKeyInfo = omMetadataManager.getMultipartInfoTable()
           .get(multipartKey);
 
       long clientID = multipartCommitUploadPartRequest.getClientID();
 
-      openKey = omMetadataManager.getOpenKey(
-          volumeName, bucketName, keyName, clientID);
+      openKey = omMetadataManager.getOpenFileName(parentID, fileName, clientID);
 
-      String ozoneKey = omMetadataManager.getOzoneKey(
-          volumeName, bucketName, keyName);
-
-      omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+      omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true,
+              omMetadataManager, openKey, keyName);
 
       if (omKeyInfo == null) {
         throw new OMException("Failed to commit Multipart Upload key, as " +
-            openKey + "entry is not found in the openKey table",
+            openKey + " entry is not found in the openKey table",
             KEY_NOT_FOUND);
       }
 
@@ -160,12 +141,13 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       omKeyInfo.setDataSize(keyArgs.getDataSize());
       omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream()
           .map(OmKeyLocationInfo::getFromProtobuf)
-          .collect(Collectors.toList()), true);
+          .collect(Collectors.toList()));
       // Set Modification time
       omKeyInfo.setModificationTime(keyArgs.getModificationTime());
       // Set the UpdateID to current transactionLogIndex
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
+      String ozoneKey = omMetadataManager.getOzonePathKey(parentID, fileName);
       partName = ozoneKey + clientID;
 
       if (multipartKeyInfo == null) {
@@ -202,8 +184,6 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       // S3MultipartUplodaCommitPartResponse before being added to
       // DeletedKeyTable.
 
-      // Add to cache.
-
       // Delete from open key table and add it to multipart info table.
       // No need to add cache entries to delete table, as no
       // read/write requests that info for validation.
@@ -230,7 +210,7 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       omResponse.setCommitMultiPartUploadResponse(
           MultipartCommitUploadPartResponse.newBuilder()
               .setPartName(partName));
-      omClientResponse = new S3MultipartUploadCommitPartResponse(
+      omClientResponse = new S3MultipartUploadCommitPartResponseV1(
           omResponse.build(), multipartKey, openKey,
           multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
           ozoneManager.isRatisEnabled(),
@@ -240,7 +220,7 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new S3MultipartUploadCommitPartResponse(
+      omClientResponse = new S3MultipartUploadCommitPartResponseV1(
           createErrorOMResponse(omResponse, exception), multipartKey, openKey,
           multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
           ozoneManager.isRatisEnabled(), copyBucketInfo);
@@ -253,34 +233,11 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
       }
     }
 
-    // audit log
-    // Add MPU related information.
-    auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER,
-        String.valueOf(keyArgs.getMultipartNumber()));
-    auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NAME, partName);
-    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY,
-        auditMap, exception,
-        getOmRequest().getUserInfo()));
-
-    switch (result) {
-    case SUCCESS:
-      LOG.debug("MultipartUpload Commit is successfully for Key:{} in " +
-          "Volume/Bucket {}/{}", keyName, volumeName, bucketName);
-      break;
-    case FAILURE:
-      ozoneManager.getMetrics().incNumCommitMultipartUploadPartFails();
-      LOG.error("MultipartUpload Commit is failed for Key:{} in " +
-          "Volume/Bucket {}/{}", keyName, volumeName, bucketName,
-          exception);
-      break;
-    default:
-      LOG.error("Unrecognized Result for S3MultipartUploadCommitPartRequest: " +
-          "{}", multipartCommitUploadPartRequest);
-    }
+    logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs,
+            auditMap, volumeName, bucketName, keyName, exception, partName,
+            result);
 
     return omClientResponse;
   }
 
 }
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseV1.java
new file mode 100644
index 0000000..d8e5cc5
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseV1.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+
+/**
+ * Response for S3MultipartUploadCommitPart request.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, DELETED_TABLE,
+    MULTIPARTFILEINFO_TABLE})
+public class S3MultipartUploadCommitPartResponseV1
+        extends S3MultipartUploadCommitPartResponse {
+
+  /**
+   * Regular response.
+   * 1. Update MultipartKey in MultipartInfoTable with new PartKeyInfo
+   * 2. Delete openKey from OpenKeyTable
+   * 3. If old PartKeyInfo exists, put it in DeletedKeyTable
+   * @param omResponse
+   * @param multipartKey
+   * @param openKey
+   * @param omMultipartKeyInfo
+   * @param oldPartKeyInfo
+   */
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  public S3MultipartUploadCommitPartResponseV1(@Nonnull OMResponse omResponse,
+      String multipartKey, String openKey,
+      @Nullable OmMultipartKeyInfo omMultipartKeyInfo,
+      @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+      @Nullable OmKeyInfo openPartKeyInfoToBeDeleted,
+      boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo) {
+
+    super(omResponse, multipartKey, openKey, omMultipartKeyInfo,
+            oldPartKeyInfo, openPartKeyInfoToBeDeleted, isRatisEnabled,
+            omBucketInfo);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index 641ee8d..9f6cff8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -152,8 +152,7 @@ public class TestS3MultipartRequest {
         TestOMRequestUtils.createCommitPartMPURequest(volumeName, bucketName,
             keyName, clientID, dataSize, multipartUploadID, partNumber);
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(omRequest);
-
+            getS3MultipartUploadCommitReq(omRequest);
 
     OMRequest modifiedRequest =
         s3MultipartUploadCommitPartRequest.preExecute(ozoneManager);
@@ -247,4 +246,15 @@ public class TestS3MultipartRequest {
 
     return modifiedRequest;
   }
+
+  protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCommitPartRequest(omRequest);
+  }
+
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+          OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequest(initiateMPURequest);
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
index d623b17..6c8beb0 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
@@ -41,27 +41,28 @@ public class TestS3MultipartUploadCommitPartRequest
   public void testPreExecute() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     doPreExecuteCommitMPU(volumeName, bucketName, keyName, Time.now(),
         UUID.randomUUID().toString(), 1);
   }
 
-
   @Test
   public void testValidateAndUpdateCacheSuccess() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
+    createParentPath(volumeName, bucketName);
+
     OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
         bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
+            getS3InitiateMultipartUploadReq(initiateMPURequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -75,12 +76,10 @@ public class TestS3MultipartUploadCommitPartRequest
         bucketName, keyName, clientID, multipartUploadID, 1);
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
     // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+    addKeyToOpenKeyTable(volumeName, bucketName, keyName, clientID);
 
     omClientResponse =
         s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
@@ -90,8 +89,8 @@ public class TestS3MultipartUploadCommitPartRequest
     Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
         == OzoneManagerProtocolProtos.Status.OK);
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+            multipartUploadID);
 
     Assert.assertNotNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
@@ -107,11 +106,12 @@ public class TestS3MultipartUploadCommitPartRequest
   public void testValidateAndUpdateCacheMultipartNotFound() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
+    createParentPath(volumeName, bucketName);
 
     long clientID = Time.now();
     String multipartUploadID = UUID.randomUUID().toString();
@@ -120,12 +120,10 @@ public class TestS3MultipartUploadCommitPartRequest
         bucketName, keyName, clientID, multipartUploadID, 1);
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
     // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+    addKeyToOpenKeyTable(volumeName, bucketName, keyName, clientID);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
@@ -135,8 +133,8 @@ public class TestS3MultipartUploadCommitPartRequest
     Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
         == OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR);
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+            multipartUploadID);
 
     Assert.assertNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
@@ -147,7 +145,7 @@ public class TestS3MultipartUploadCommitPartRequest
   public void testValidateAndUpdateCacheKeyNotFound() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
@@ -163,7 +161,7 @@ public class TestS3MultipartUploadCommitPartRequest
     // part. It will fail with KEY_NOT_FOUND
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
 
     OMClientResponse omClientResponse =
@@ -180,7 +178,7 @@ public class TestS3MultipartUploadCommitPartRequest
   public void testValidateAndUpdateCacheBucketFound() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
 
@@ -195,7 +193,7 @@ public class TestS3MultipartUploadCommitPartRequest
     // part. It will fail with KEY_NOT_FOUND
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
 
     OMClientResponse omClientResponse =
@@ -206,4 +204,26 @@ public class TestS3MultipartUploadCommitPartRequest
         == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
 
   }
+
+  protected void addKeyToOpenKeyTable(String volumeName, String bucketName,
+      String keyName, long clientID) throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
+            keyName, clientID, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+  }
+
+  protected String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    return omMetadataManager.getMultipartKey(volumeName,
+            bucketName, keyName, multipartUploadID);
+  }
+
+  protected void createParentPath(String volumeName, String bucketName)
+          throws Exception {
+    // no parent hierarchy
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestV1.java
new file mode 100644
index 0000000..d0d01e1
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestV1.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+
+import java.util.UUID;
+
+/**
+ * Tests S3 Multipart upload commit part request.
+ */
+public class TestS3MultipartUploadCommitPartRequestV1
+    extends TestS3MultipartUploadCommitPartRequest {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCommitPartRequestV1(omRequest);
+  }
+
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+          OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequestV1(initiateMPURequest);
+  }
+
+  protected String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  protected void addKeyToOpenKeyTable(String volumeName, String bucketName,
+      String keyName, long clientID) throws Exception {
+    long txnLogId = 10000;
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID,
+            txnLogId, Time.now());
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfo, clientID, txnLogId, omMetadataManager);
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    String fileName = StringUtils.substringAfter(keyName, dirName);
+    return omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+  }
+
+  protected OMRequest doPreExecuteInitiateMPU(String volumeName,
+      String bucketName, String keyName) throws Exception {
+    OMRequest omRequest =
+            TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName,
+                    keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+            new S3InitiateMultipartUploadRequestV1(omRequest);
+
+    OMRequest modifiedRequest =
+            s3InitiateMultipartUploadRequest.preExecute(ozoneManager);
+
+    Assert.assertNotEquals(omRequest, modifiedRequest);
+    Assert.assertTrue(modifiedRequest.hasInitiateMultiPartUploadRequest());
+    Assert.assertNotNull(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID());
+    Assert.assertTrue(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getModificationTime() > 0);
+
+    return modifiedRequest;
+  }
+
+  protected void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            dirName, omMetadataManager);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 76ceb0e..106ae61 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.ozone.om.response.s3.multipart;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -156,6 +157,24 @@ public class TestS3MultipartResponse {
             .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
   }
 
+  public PartKeyInfo createPartKeyInfoV1(
+      String volumeName, String bucketName, long parentID, String fileName,
+      int partNumber) {
+    return PartKeyInfo.newBuilder()
+        .setPartNumber(partNumber)
+        .setPartName(omMetadataManager.getMultipartKey(parentID, fileName,
+                UUID.randomUUID().toString()))
+        .setPartKeyInfo(KeyInfo.newBuilder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(fileName)
+            .setDataSize(100L) // Just set dummy size for testing
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setParentID(parentID)
+            .setType(HddsProtos.ReplicationType.RATIS)
+            .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
+  }
 
   public S3InitiateMultipartUploadResponse createS3InitiateMPUResponseV1(
       String volumeName, String bucketName, long parentID, String keyName,
@@ -198,4 +217,61 @@ public class TestS3MultipartResponse {
             omKeyInfo, parentDirInfos);
   }
 
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  public S3MultipartUploadCommitPartResponse createS3CommitMPUResponseV1(
+          String volumeName, String bucketName, long parentID, String keyName,
+          String multipartUploadID,
+          OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+          OmMultipartKeyInfo multipartKeyInfo,
+          OzoneManagerProtocolProtos.Status status, String openKey)
+          throws IOException {
+    if (multipartKeyInfo == null) {
+      multipartKeyInfo = new OmMultipartKeyInfo.Builder()
+              .setUploadID(multipartUploadID)
+              .setCreationTime(Time.now())
+              .setReplicationType(HddsProtos.ReplicationType.RATIS)
+              .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
+              .setParentID(parentID)
+              .build();
+    }
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+
+    String multipartKey = getMultipartKey(parentID, keyName, multipartUploadID);
+    boolean isRatisEnabled = true;
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+
+    OmKeyInfo openPartKeyInfoToBeDeleted = new OmKeyInfo.Builder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(fileName)
+            .setFileName(fileName)
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setReplicationType(HddsProtos.ReplicationType.RATIS)
+            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+            .build();
+
+    OMResponse omResponse = OMResponse.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload)
+            .setStatus(status).setSuccess(true)
+            .setCommitMultiPartUploadResponse(
+                    OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse
+                            .newBuilder().setPartName(volumeName)).build();
+
+    return new S3MultipartUploadCommitPartResponseV1(omResponse, multipartKey,
+            openKey, multipartKeyInfo, oldPartKeyInfo,
+            openPartKeyInfoToBeDeleted, isRatisEnabled, omBucketInfo);
+  }
+
+  private String getMultipartKey(long parentID, String keyName,
+                                 String multipartUploadID) {
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseV1.java
new file mode 100644
index 0000000..511ffef
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseV1.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Test multipart upload commit part response.
+ */
+public class TestS3MultipartUploadCommitPartResponseV1
+    extends TestS3MultipartResponse {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  @Test
+  public void testAddDBToBatch() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    createParentPath(volumeName, bucketName);
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+        createS3CommitMPUResponseV1(volumeName, bucketName, parentID, keyName,
+            multipartUploadID, null, null,
+                OzoneManagerProtocolProtos.Status.OK, openKey);
+
+    s3MultipartUploadCommitPartResponse.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNotNull(
+        omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    // As no parts are created, so no entries should be there in delete table.
+    Assert.assertEquals(0, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+  }
+
+  @Test
+  public void testAddDBToBatchWithParts() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    createParentPath(volumeName, bucketName);
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+            createS3InitiateMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, new ArrayList<>());
+
+    s3InitiateMultipartUploadResponseV1.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    // Add some dummy parts for testing.
+    // Not added any key locations, as this just test is to see entries are
+    // adding to delete table or not.
+    OmMultipartKeyInfo omMultipartKeyInfo =
+            s3InitiateMultipartUploadResponseV1.getOmMultipartKeyInfo();
+
+    PartKeyInfo part1 = createPartKeyInfoV1(volumeName, bucketName, parentID,
+        fileName, 1);
+
+    addPart(1, part1, omMultipartKeyInfo);
+
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID,
+                    omMultipartKeyInfo.getPartKeyInfo(1),
+                    omMultipartKeyInfo,
+                    OzoneManagerProtocolProtos.Status.OK,  openKey);
+
+    s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+            batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNull(
+        omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    // As 1 parts are created, so 1 entry should be there in delete table.
+    Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+        omMetadataManager.getDeletedTable()));
+
+    String part1DeletedKeyName =
+        omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
+
+    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+        part1DeletedKeyName));
+
+    RepeatedOmKeyInfo ro =
+        omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
+    Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()),
+        ro.getOmKeyInfoList().get(0));
+  }
+
+  @Test
+  public void testWithMultipartUploadError() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    createParentPath(volumeName, bucketName);
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+            createS3InitiateMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, new ArrayList<>());
+
+    s3InitiateMultipartUploadResponseV1.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    // Add some dummy parts for testing.
+    // Not added any key locations, as this just test is to see entries are
+    // adding to delete table or not.
+    OmMultipartKeyInfo omMultipartKeyInfo =
+            s3InitiateMultipartUploadResponseV1.getOmMultipartKeyInfo();
+
+    PartKeyInfo part1 = createPartKeyInfoV1(volumeName, bucketName, parentID,
+            fileName, 1);
+
+    addPart(1, part1, omMultipartKeyInfo);
+
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName + "invalid", multipartUploadID,
+                    omMultipartKeyInfo.getPartKeyInfo(1),
+                    omMultipartKeyInfo, OzoneManagerProtocolProtos.Status
+                            .NO_SUCH_MULTIPART_UPLOAD_ERROR, openKey);
+
+    s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+            batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNull(
+            omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    // openkey entry should be there in delete table.
+    Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+
+    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+            openKey));
+  }
+
+  private String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  private void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            dirName, omMetadataManager);
+  }
+}

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 15/29: HDDS-4805. [FSO]Fix findbugs issues after HDDS-2195 (#1906)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 68aa4fdf6923b472a4902537037c6da293570994
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Mon Feb 8 19:29:09 2021 +0530

    HDDS-4805. [FSO]Fix findbugs issues after HDDS-2195 (#1906)
---
 .../apache/hadoop/fs/ozone/TestOzoneFileOps.java   | 15 +++-
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       | 60 ++++++++------
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 94 +++++++++++-----------
 .../hadoop/ozone/client/rpc/TestReadRetries.java   | 10 +--
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 39 ++++++---
 .../file/TestOMDirectoryCreateRequestV1.java       | 20 ++---
 .../om/request/key/TestOMKeyCreateRequestV1.java   |  5 +-
 .../TestS3InitiateMultipartUploadRequestV1.java    |  4 -
 .../file/TestOMDirectoryCreateResponseV1.java      |  2 -
 .../response/file/TestOMFileCreateResponseV1.java  |  6 +-
 .../om/response/key/TestOMKeyCommitResponse.java   |  2 -
 .../om/response/key/TestOMKeyCreateResponseV1.java |  6 +-
 .../TestS3InitiateMultipartUploadResponseV1.java   |  5 --
 13 files changed, 147 insertions(+), 121 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
index 12dd51e..176d0c4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -155,12 +156,20 @@ public class TestOzoneFileOps {
     // trigger CommitKeyRequest
     outputStream.close();
 
-    Assert.assertTrue("Failed to commit the open file:" + openFileKey,
-            omMgr.getOpenKeyTable().isEmpty());
-
     OmKeyInfo omKeyInfo = omMgr.getKeyTable().get(openFileKey);
     Assert.assertNotNull("Invalid Key!", omKeyInfo);
     verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
+
+    // wait for DB updates
+    GenericTestUtils.waitFor(() -> {
+      try {
+        return omMgr.getOpenKeyTable().isEmpty();
+      } catch (IOException e) {
+        LOG.error("DB failure!", e);
+        Assert.fail("DB failure!");
+        return false;
+      }
+    }, 1000, 120000);
   }
 
   private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 7e91576..a8e89e1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.ozone;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -125,26 +126,16 @@ public class TestOzoneFileSystem {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestOzoneFileSystem.class);
 
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static boolean isBucketFSOptimized = false;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static boolean enabledFileSystemPaths;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static boolean omRatisEnabled;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static MiniOzoneCluster cluster;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static FileSystem fs;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static OzoneFileSystem o3fs;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static String volumeName;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static String bucketName;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static int rootItemCount;
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  protected static Trash trash;
+  private static boolean isBucketFSOptimized = false;
+  private static boolean enabledFileSystemPaths;
+  private static boolean omRatisEnabled;
+
+  private static MiniOzoneCluster cluster;
+  private static FileSystem fs;
+  private static OzoneFileSystem o3fs;
+  private static String volumeName;
+  private static String bucketName;
+  private static Trash trash;
 
   private void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
@@ -203,6 +194,26 @@ public class TestOzoneFileSystem {
     }
   }
 
+  public static FileSystem getFs() {
+    return fs;
+  }
+
+  public static boolean isEnabledFileSystemPaths() {
+    return enabledFileSystemPaths;
+  }
+
+  public static void setIsBucketFSOptimized(boolean isBucketFSO) {
+    isBucketFSOptimized = isBucketFSO;
+  }
+
+  public static String getBucketName() {
+    return bucketName;
+  }
+
+  public static String getVolumeName() {
+    return volumeName;
+  }
+
   @Test
   public void testCreateFileShouldCheckExistenceOfDirWithSameName()
       throws Exception {
@@ -606,7 +617,7 @@ public class TestOzoneFileSystem {
     // Added logs for debugging failures, to check any sub-path mismatches.
     Set<String> actualPaths = new TreeSet<>();
     ArrayList<String> actualPathList = new ArrayList<>();
-    if (rootItemCount != fileStatuses.length) {
+    if (numDirs != fileStatuses.length) {
       for (int i = 0; i < fileStatuses.length; i++) {
         boolean duplicate =
                 actualPaths.add(fileStatuses[i].getPath().getName());
@@ -616,7 +627,7 @@ public class TestOzoneFileSystem {
         }
         actualPathList.add(fileStatuses[i].getPath().getName());
       }
-      if (rootItemCount != actualPathList.size()) {
+      if (numDirs != actualPathList.size()) {
         LOG.info("actualPathsSize: {}", actualPaths.size());
         LOG.info("actualPathListSize: {}", actualPathList.size());
         actualPaths.removeAll(paths);
@@ -643,8 +654,6 @@ public class TestOzoneFileSystem {
     Path root = new Path("/");
     FileStatus[] fileStatuses = fs.listStatus(root);
 
-    rootItemCount = 0; // reset to zero
-
     if (fileStatuses == null) {
       return;
     }
@@ -723,7 +732,7 @@ public class TestOzoneFileSystem {
   public void testAllocateMoreThanOneBlock() throws IOException {
     Path file = new Path("/file");
     String str = "TestOzoneFileSystemV1.testSeekOnFileLength";
-    byte[] strBytes = str.getBytes();
+    byte[] strBytes = str.getBytes(StandardCharsets.UTF_8);
     long numBlockAllocationsOrg =
             cluster.getOzoneManager().getMetrics().getNumBlockAllocates();
 
@@ -1052,7 +1061,6 @@ public class TestOzoneFileSystem {
   @Test
   public void testRenameDir() throws Exception {
     final String dir = "/root_dir/dir1";
-    Path rootDir = new Path(fs.getUri().toString() +  "/root_dir");
     final Path source = new Path(fs.getUri().toString() + dir);
     final Path dest = new Path(source.toString() + ".renamed");
     // Add a sub-dir to the directory to be moved.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index ffeb5a3..03846ae 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -29,9 +29,7 @@ import org.junit.Assert;
 import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
@@ -66,7 +64,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
 
   @BeforeClass
   public static void init() {
-    isBucketFSOptimized = true;
+    setIsBucketFSOptimized(true);
   }
 
   public TestOzoneFileSystemV1(boolean setDefaultFs, boolean enableOMRatis) {
@@ -85,12 +83,6 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     }
   }
 
-  /**
-   * Set a timeout for each test.
-   */
-  @Rule
-  public Timeout timeout = new Timeout(300000);
-
   private static final Logger LOG =
       LoggerFactory.getLogger(TestOzoneFileSystemV1.class);
 
@@ -106,30 +98,32 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
      * Op 7. create file -> /d1/d2/key1
      */
     Path key1 = new Path("/key1");
-    try (FSDataOutputStream outputStream = fs.create(key1, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(key1,
+            false)) {
       assertNotNull("Should be able to create file: key1",
               outputStream);
     }
     Path d1 = new Path("/d1");
     Path dir1Key1 = new Path(d1, "key1");
-    try (FSDataOutputStream outputStream = fs.create(dir1Key1, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(dir1Key1, false)) {
       assertNotNull("Should be able to create file: " + dir1Key1,
               outputStream);
     }
     Path d2 = new Path("/d2");
     Path dir2Key1 = new Path(d2, "key1");
-    try (FSDataOutputStream outputStream = fs.create(dir2Key1, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(dir2Key1, false)) {
       assertNotNull("Should be able to create file: " + dir2Key1,
               outputStream);
     }
     Path dir1Dir2 = new Path("/d1/d2/");
     Path dir1Dir2Key1 = new Path(dir1Dir2, "key1");
-    try (FSDataOutputStream outputStream = fs.create(dir1Dir2Key1, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(dir1Dir2Key1,
+            false)) {
       assertNotNull("Should be able to create file: " + dir1Dir2Key1,
               outputStream);
     }
     Path d1Key2 = new Path(d1, "key2");
-    try (FSDataOutputStream outputStream = fs.create(d1Key2, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(d1Key2, false)) {
       assertNotNull("Should be able to create file: " + d1Key2,
               outputStream);
     }
@@ -137,11 +131,14 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     Path dir1Dir3 = new Path("/d1/d3/");
     Path dir1Dir4 = new Path("/d1/d4/");
 
-    fs.mkdirs(dir1Dir3);
-    fs.mkdirs(dir1Dir4);
+    getFs().mkdirs(dir1Dir3);
+    getFs().mkdirs(dir1Dir4);
+
+    String bucketName = getBucketName();
+    String volumeName = getVolumeName();
 
     // Root Directory
-    FileStatus[] fileStatusList = fs.listStatus(new Path("/"));
+    FileStatus[] fileStatusList = getFs().listStatus(new Path("/"));
     assertEquals("FileStatus should return files and directories",
             3, fileStatusList.length);
     ArrayList<String> expectedPaths = new ArrayList<>();
@@ -155,7 +152,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
             0, expectedPaths.size());
 
     // level-1 sub-dirs
-    fileStatusList = fs.listStatus(new Path("/d1"));
+    fileStatusList = getFs().listStatus(new Path("/d1"));
     assertEquals("FileStatus should return files and directories",
             5, fileStatusList.length);
     expectedPaths = new ArrayList<>();
@@ -171,7 +168,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
             0, expectedPaths.size());
 
     // level-2 sub-dirs
-    fileStatusList = fs.listStatus(new Path("/d1/d2"));
+    fileStatusList = getFs().listStatus(new Path("/d1/d2"));
     assertEquals("FileStatus should return files and directories",
             1, fileStatusList.length);
     expectedPaths = new ArrayList<>();
@@ -184,7 +181,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
             0, expectedPaths.size());
 
     // level-2 key2
-    fileStatusList = fs.listStatus(new Path("/d1/d2/key1"));
+    fileStatusList = getFs().listStatus(new Path("/d1/d2/key1"));
     assertEquals("FileStatus should return files and directories",
             1, fileStatusList.length);
     expectedPaths = new ArrayList<>();
@@ -198,13 +195,13 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
 
     // invalid root key
     try {
-      fileStatusList = fs.listStatus(new Path("/key2"));
+      fileStatusList = getFs().listStatus(new Path("/key2"));
       fail("Should throw FileNotFoundException");
     } catch (FileNotFoundException fnfe) {
       // ignore as its expected
     }
     try {
-      fileStatusList = fs.listStatus(new Path("/d1/d2/key2"));
+      fileStatusList = getFs().listStatus(new Path("/d1/d2/key2"));
       fail("Should throw FileNotFoundException");
     } catch (FileNotFoundException fnfe) {
       // ignore as its expected
@@ -221,30 +218,30 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
      * Op 4. create dir -> /d1/d2/d1/d2/key1
      */
     Path dir1Dir1Dir2Key1 = new Path("/d1/d1/d2/key1");
-    try (FSDataOutputStream outputStream = fs.create(dir1Dir1Dir2Key1,
+    try (FSDataOutputStream outputStream = getFs().create(dir1Dir1Dir2Key1,
             false)) {
       assertNotNull("Should be able to create file: " + dir1Dir1Dir2Key1,
               outputStream);
     }
     Path key1 = new Path("/key1");
-    try (FSDataOutputStream outputStream = fs.create(key1, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(key1, false)) {
       assertNotNull("Should be able to create file: " + key1,
               outputStream);
     }
     Path key2 = new Path("/key2");
-    try (FSDataOutputStream outputStream = fs.create(key2, false)) {
+    try (FSDataOutputStream outputStream = getFs().create(key2, false)) {
       assertNotNull("Should be able to create file: key2",
               outputStream);
     }
     Path dir1Dir2Dir1Dir2Key1 = new Path("/d1/d2/d1/d2/key1");
-    try (FSDataOutputStream outputStream = fs.create(dir1Dir2Dir1Dir2Key1,
+    try (FSDataOutputStream outputStream = getFs().create(dir1Dir2Dir1Dir2Key1,
             false)) {
       assertNotNull("Should be able to create file: "
               + dir1Dir2Dir1Dir2Key1, outputStream);
     }
-    RemoteIterator<LocatedFileStatus> fileStatusItr = fs.listFiles(new Path(
-            "/"), true);
-    String uriPrefix = "o3fs://" + bucketName + "." + volumeName;
+    RemoteIterator<LocatedFileStatus> fileStatusItr = getFs().listFiles(
+            new Path("/"), true);
+    String uriPrefix = "o3fs://" + getBucketName() + "." + getVolumeName();
     ArrayList<String> expectedPaths = new ArrayList<>();
     expectedPaths.add(uriPrefix + dir1Dir1Dir2Key1.toString());
     expectedPaths.add(uriPrefix + key1.toString());
@@ -263,7 +260,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
             expectedPaths.size());
 
     // Recursive=false
-    fileStatusItr = fs.listFiles(new Path("/"), false);
+    fileStatusItr = getFs().listFiles(new Path("/"), false);
     expectedPaths.clear();
     expectedPaths.add(uriPrefix + "/key1");
     expectedPaths.add(uriPrefix + "/key2");
@@ -286,23 +283,23 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   @Test
   public void testRenameWithNonExistentSource() throws Exception {
     // Skip as this will run only in new layout
-    if (!enabledFileSystemPaths) {
+    if (!isEnabledFileSystemPaths()) {
       return;
     }
 
     final String root = "/root";
     final String dir1 = root + "/dir1";
     final String dir2 = root + "/dir2";
-    final Path source = new Path(fs.getUri().toString() + dir1);
-    final Path destin = new Path(fs.getUri().toString() + dir2);
+    final Path source = new Path(getFs().getUri().toString() + dir1);
+    final Path destin = new Path(getFs().getUri().toString() + dir2);
 
     // creates destin
-    fs.mkdirs(destin);
+    getFs().mkdirs(destin);
     LOG.info("Created destin dir: {}", destin);
 
     LOG.info("Rename op-> source:{} to destin:{}}", source, destin);
     try {
-      fs.rename(source, destin);
+      getFs().rename(source, destin);
       Assert.fail("Should throw exception : Source doesn't exist!");
     } catch (OMException ome) {
       // expected
@@ -316,22 +313,22 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   @Test
   public void testRenameDirToItsOwnSubDir() throws Exception {
     // Skip as this will run only in new layout
-    if (!enabledFileSystemPaths) {
+    if (!isEnabledFileSystemPaths()) {
       return;
     }
 
     final String root = "/root";
     final String dir1 = root + "/dir1";
-    final Path dir1Path = new Path(fs.getUri().toString() + dir1);
+    final Path dir1Path = new Path(getFs().getUri().toString() + dir1);
     // Add a sub-dir1 to the directory to be moved.
     final Path subDir1 = new Path(dir1Path, "sub_dir1");
-    fs.mkdirs(subDir1);
+    getFs().mkdirs(subDir1);
     LOG.info("Created dir1 {}", subDir1);
 
-    final Path sourceRoot = new Path(fs.getUri().toString() + root);
+    final Path sourceRoot = new Path(getFs().getUri().toString() + root);
     LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
     try {
-      fs.rename(sourceRoot, subDir1);
+      getFs().rename(sourceRoot, subDir1);
       Assert.fail("Should throw exception : Cannot rename a directory to" +
               " its own subdirectory");
     } catch (OMException ome) {
@@ -348,20 +345,21 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   @Test
   public void testRenameDestinationParentDoesntExist() throws Exception {
     // Skip as this will run only in new layout
-    if (!enabledFileSystemPaths) {
+    if (!isEnabledFileSystemPaths()) {
       return;
     }
 
     final String root = "/root_dir";
     final String dir1 = root + "/dir1";
     final String dir2 = dir1 + "/dir2";
-    final Path dir2SourcePath = new Path(fs.getUri().toString() + dir2);
-    fs.mkdirs(dir2SourcePath);
+    final Path dir2SourcePath = new Path(getFs().getUri().toString() + dir2);
+    getFs().mkdirs(dir2SourcePath);
 
     // (a) parent of dst does not exist.  /root_dir/b/c
-    final Path destinPath = new Path(fs.getUri().toString() + root + "/b/c");
+    final Path destinPath = new Path(getFs().getUri().toString()
+            + root + "/b/c");
     try {
-      fs.rename(dir2SourcePath, destinPath);
+      getFs().rename(dir2SourcePath, destinPath);
       Assert.fail("Should fail as parent of dst does not exist!");
     } catch (OMException ome) {
       // expected
@@ -369,12 +367,12 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     }
 
     // (b) parent of dst is a file. /root_dir/file1/c
-    Path filePath = new Path(fs.getUri().toString() + root + "/file1");
-    ContractTestUtils.touch(fs, filePath);
+    Path filePath = new Path(getFs().getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(getFs(), filePath);
 
     Path newDestinPath = new Path(filePath, "c");
     try {
-      fs.rename(dir2SourcePath, newDestinPath);
+      getFs().rename(dir2SourcePath, newDestinPath);
       Assert.fail("Should fail as parent of dst is a file!");
     } catch (OMException ome) {
       // expected
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index d729ad3..a61c798 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -87,11 +87,11 @@ public class TestReadRetries {
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-  private static OzoneManager ozoneManager;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
+  private MiniOzoneCluster cluster = null;
+  private OzoneClient ozClient = null;
+  private ObjectStore store = null;
+  private OzoneManager ozoneManager;
+  private StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
 
   private static final String SCM_ID = UUID.randomUUID().toString();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index d09020e..b877e29 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
 import org.junit.AfterClass;
@@ -50,8 +51,10 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.HashMap;
 import java.util.UUID;
+import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
 import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
@@ -175,7 +178,8 @@ public class TestObjectStoreV1 {
     verifyKeyInOpenFileTable(openFileTable, clientID, file,
             dirPathC.getObjectID(), false);
 
-    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0,
+            data.length());
     ozoneOutputStream.close();
 
     Table<String, OmKeyInfo> fileTable =
@@ -227,7 +231,8 @@ public class TestObjectStoreV1 {
     verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
             dirPathC.getObjectID(), false);
 
-    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0,
+            data.length());
 
     // open key
     try {
@@ -356,22 +361,21 @@ public class TestObjectStoreV1 {
   }
 
   private void assertKeyRenamedEx(OzoneBucket bucket, String keyName)
-      throws Exception {
-    OMException oe = null;
+          throws Exception {
     try {
       bucket.getKey(keyName);
-    } catch (OMException e) {
-      oe = e;
+      fail("Should throw KeyNotFound as the key got renamed!");
+    } catch (OMException ome) {
+      Assert.assertEquals(KEY_NOT_FOUND, ome.getResult());
     }
-    Assert.assertEquals(KEY_NOT_FOUND, oe.getResult());
   }
 
   private void createTestKey(OzoneBucket bucket, String keyName,
       String keyValue) throws IOException {
     OzoneOutputStream out = bucket.createKey(keyName,
-            keyValue.getBytes().length, STAND_ALONE,
+            keyValue.getBytes(StandardCharsets.UTF_8).length, STAND_ALONE,
             ONE, new HashMap<>());
-    out.write(keyValue.getBytes());
+    out.write(keyValue.getBytes(StandardCharsets.UTF_8));
     out.close();
     OzoneKey key = bucket.getKey(keyName);
     Assert.assertEquals(keyName, key.getName());
@@ -416,13 +420,24 @@ public class TestObjectStoreV1 {
 
   private void verifyKeyInOpenFileTable(Table<String, OmKeyInfo> openFileTable,
       long clientID, String fileName, long parentID, boolean isEmpty)
-          throws IOException {
+          throws IOException, TimeoutException, InterruptedException {
     String dbOpenFileKey =
             parentID + OM_KEY_PREFIX + fileName + OM_KEY_PREFIX + clientID;
-    OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
+
     if (isEmpty) {
-      Assert.assertNull("Table is not empty!", omKeyInfo);
+      // wait for DB updates
+      GenericTestUtils.waitFor(() -> {
+        try {
+          OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
+          return omKeyInfo == null;
+        } catch (IOException e) {
+          Assert.fail("DB failure!");
+          return false;
+        }
+
+      }, 1000, 120000);
     } else {
+      OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
       Assert.assertNotNull("Table is empty!", omKeyInfo);
       // used startsWith because the key format is,
       // <parentID>/fileName/<clientID> and clientID is not visible.
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
index 454cfbb..d8c8108 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.AuditMessage;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -369,8 +370,9 @@ public class TestOMDirectoryCreateRequestV1 {
             bucketName, keyName, HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.THREE, objID++);
     String ozoneFileName = parentID + "/" + dirs.get(dirs.size() - 1);
+    ++txnID;
     omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneFileName),
-            new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+            new CacheValue<>(Optional.of(omKeyInfo), txnID));
     omMetadataManager.getKeyTable().put(ozoneFileName, omKeyInfo);
 
     OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
@@ -435,10 +437,12 @@ public class TestOMDirectoryCreateRequestV1 {
     // Add a key in second level.
     OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
             bucketName, keyName, HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, objID++);
+            HddsProtos.ReplicationFactor.THREE, objID);
+
     String ozoneKey = parentID + "/" + dirs.get(1);
+    ++txnID;
     omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
-            new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+            new CacheValue<>(Optional.of(omKeyInfo), txnID));
     omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
 
     OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
@@ -517,10 +521,6 @@ public class TestOMDirectoryCreateRequestV1 {
     // Add volume and bucket entries to DB.
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
             omMetadataManager);
-    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-    OmBucketInfo omBucketInfo =
-            omMetadataManager.getBucketTable().get(bucketKey);
-    long bucketID = omBucketInfo.getObjectID();
 
     OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
             OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
@@ -587,12 +587,14 @@ public class TestOMDirectoryCreateRequestV1 {
   private String createDirKey(List<String> dirs, int depth) {
     String keyName = RandomStringUtils.randomAlphabetic(5);
     dirs.add(keyName);
+    StringBuffer buf = new StringBuffer(keyName);
     for (int i = 0; i < depth; i++) {
       String dirName = RandomStringUtils.randomAlphabetic(5);
       dirs.add(dirName);
-      keyName += "/" + dirName;
+      buf.append(OzoneConsts.OM_KEY_PREFIX);
+      buf.append(dirName);
     }
-    return keyName;
+    return buf.toString();
   }
 
   private void verifyDirectoriesInDB(List<String> dirs, long bucketID)
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
index b65443d..e545dc7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
@@ -75,7 +75,9 @@ public class TestOMKeyCreateRequestV1 extends TestOMKeyCreateRequest {
     long parentID = checkIntermediatePaths(keyPath);
 
     // Check open key entry
-    String fileName = keyPath.getFileName().toString();
+    Path keyPathFileName = keyPath.getFileName();
+    Assert.assertNotNull("Failed to find fileName", keyPathFileName);
+    String fileName = keyPathFileName.toString();
     String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
             omRequest.getCreateKeyRequest().getClientID());
     OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
@@ -88,6 +90,7 @@ public class TestOMKeyCreateRequestV1 extends TestOMKeyCreateRequest {
     String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
     OmBucketInfo omBucketInfo =
             omMetadataManager.getBucketTable().get(bucketKey);
+    Assert.assertNotNull("Bucket not found!", omBucketInfo);
     long lastKnownParentId = omBucketInfo.getObjectID();
 
     Iterator<Path> elements = keyPath.iterator();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
index dac2efe..5fa75ba 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
@@ -133,10 +133,6 @@ public class TestS3InitiateMultipartUploadRequestV1
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
-            .getKeyArgs().getMultipartUploadID());
-
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isEmpty());
     Assert.assertTrue(omMetadataManager.getMultipartInfoTable().isEmpty());
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
index 0a1114a..fb06581 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
@@ -59,9 +59,7 @@ public class TestOMDirectoryCreateResponseV1 {
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    String volumeName = UUID.randomUUID().toString();
     String keyName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
 
     long parentID = 100;
     OmDirectoryInfo omDirInfo =
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
index e1549e1..a8a0c99 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.util.Time;
 import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 
+import java.util.ArrayList;
+
 /**
  * Tests MKeyCreateResponse layout version V1.
  */
@@ -59,8 +61,8 @@ public class TestOMFileCreateResponseV1 extends TestOMKeyCreateResponse {
   protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
       OmBucketInfo bucketInfo, OMResponse response) {
 
-    return new OMFileCreateResponseV1(response, keyInfo, null, clientID,
-            bucketInfo);
+    return new OMFileCreateResponseV1(response, keyInfo, new ArrayList<>(),
+            clientID, bucketInfo);
   }
 
   @NotNull
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
index 4d50337..e2a223c 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
@@ -34,8 +34,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 @SuppressWarnings("visibilitymodifier")
 public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
-  protected OmBucketInfo omBucketInfo;
-
   @Test
   public void testAddToDBBatch() throws Exception {
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
index 6299639..834aafa 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.util.Time;
 import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 
+import java.util.ArrayList;
+
 /**
  * Tests OMKeyCreateResponseV1.
  */
@@ -67,7 +69,7 @@ public class TestOMKeyCreateResponseV1 extends TestOMKeyCreateResponse {
   protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
       OmBucketInfo bucketInfo, OMResponse response) {
 
-    return new OMKeyCreateResponseV1(response, keyInfo, null, clientID,
-            bucketInfo);
+    return new OMKeyCreateResponseV1(response, keyInfo,  new ArrayList<>(),
+            clientID, bucketInfo);
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java
index 31f9e5a..6dd6aaf 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java
@@ -40,11 +40,6 @@ public class TestS3InitiateMultipartUploadResponseV1
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     String prefix = "a/b/c/d/";
-    List<String> dirs = new ArrayList<String>();
-    dirs.add("a");
-    dirs.add("b");
-    dirs.add("c");
-    dirs.add("d");
     String fileName = UUID.randomUUID().toString();
     String keyName = prefix + fileName;
 

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 18/29: HDDS-4813. [FSO]S3Multipart: Implement UploadCompleteRequest (#1923)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 15aa0932c325a4db08a8607a627ae37d0deab0a8
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Feb 17 20:50:05 2021 +0530

    HDDS-4813. [FSO]S3Multipart: Implement UploadCompleteRequest (#1923)
---
 .../rpc/TestOzoneClientMultipartUploadV1.java      | 274 ++++++++++++++--
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../ozone/om/request/file/OMFileRequest.java       |   4 +-
 .../S3MultipartUploadCommitPartRequestV1.java      |  39 ++-
 .../S3MultipartUploadCompleteRequest.java          | 349 +++++++++++++--------
 .../S3MultipartUploadCompleteRequestV1.java        | 268 ++++++++++++++++
 .../S3MultipartUploadCompleteResponse.java         |  13 +-
 ...va => S3MultipartUploadCompleteResponseV1.java} |  60 ++--
 .../s3/multipart/TestS3MultipartRequest.java       |   9 +-
 .../TestS3MultipartUploadCompleteRequest.java      | 118 ++++++-
 .../TestS3MultipartUploadCompleteRequestV1.java    | 132 ++++++++
 .../s3/multipart/TestS3MultipartResponse.java      |  21 ++
 .../TestS3MultipartUploadCompleteResponseV1.java   | 257 +++++++++++++++
 13 files changed, 1330 insertions(+), 218 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
index af241c5..1ab2cc3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
@@ -17,24 +17,29 @@
 
 package org.apache.hadoop.ozone.client.rpc;
 
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneTestUtils;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
 import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
 
+import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -44,10 +49,15 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.TreeMap;
 import java.util.UUID;
 
 import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
 import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
 
 /**
  * This test verifies all the S3 multipart client apis - layout version V1.
@@ -133,24 +143,24 @@ public class TestOzoneClientMultipartUploadV1 {
     OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
             STAND_ALONE, ONE);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     String uploadID = multipartInfo.getUploadID();
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotNull(multipartInfo.getUploadID());
 
     // Call initiate multipart upload for the same key again, this should
     // generate a new uploadID.
     multipartInfo = bucket.initiateMultipartUpload(keyName,
             STAND_ALONE, ONE);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotEquals(multipartInfo.getUploadID(), uploadID);
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotEquals(multipartInfo.getUploadID(), uploadID);
+    Assert.assertNotNull(multipartInfo.getUploadID());
   }
 
   @Test
@@ -166,23 +176,23 @@ public class TestOzoneClientMultipartUploadV1 {
     OzoneBucket bucket = volume.getBucket(bucketName);
     OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     String uploadID = multipartInfo.getUploadID();
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotNull(multipartInfo.getUploadID());
 
     // Call initiate multipart upload for the same key again, this should
     // generate a new uploadID.
     multipartInfo = bucket.initiateMultipartUpload(keyName);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotEquals(multipartInfo.getUploadID(), uploadID);
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotEquals(multipartInfo.getUploadID(), uploadID);
+    Assert.assertNotNull(multipartInfo.getUploadID());
   }
 
   @Test
@@ -199,12 +209,12 @@ public class TestOzoneClientMultipartUploadV1 {
     OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
             STAND_ALONE, ONE);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     String uploadID = multipartInfo.getUploadID();
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotNull(multipartInfo.getUploadID());
 
     OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
             sampleData.length(), 1, uploadID);
@@ -214,8 +224,8 @@ public class TestOzoneClientMultipartUploadV1 {
     OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
             .getCommitUploadPartInfo();
 
-    assertNotNull(commitUploadPartInfo);
-    assertNotNull(commitUploadPartInfo.getPartName());
+    Assert.assertNotNull(commitUploadPartInfo);
+    Assert.assertNotNull(commitUploadPartInfo.getPartName());
   }
 
   @Test
@@ -233,12 +243,12 @@ public class TestOzoneClientMultipartUploadV1 {
     OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
             ReplicationType.RATIS, THREE);
 
-    assertNotNull(multipartInfo);
+    Assert.assertNotNull(multipartInfo);
     String uploadID = multipartInfo.getUploadID();
     Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
     Assert.assertEquals(bucketName, multipartInfo.getBucketName());
     Assert.assertEquals(keyName, multipartInfo.getKeyName());
-    assertNotNull(multipartInfo.getUploadID());
+    Assert.assertNotNull(multipartInfo.getUploadID());
 
     int partNumber = 1;
 
@@ -250,9 +260,9 @@ public class TestOzoneClientMultipartUploadV1 {
     OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
             .getCommitUploadPartInfo();
 
-    assertNotNull(commitUploadPartInfo);
+    Assert.assertNotNull(commitUploadPartInfo);
     String partName = commitUploadPartInfo.getPartName();
-    assertNotNull(commitUploadPartInfo.getPartName());
+    Assert.assertNotNull(commitUploadPartInfo.getPartName());
 
     //Overwrite the part by creating part key with same part number.
     sampleData = "sample Data Changed";
@@ -264,12 +274,230 @@ public class TestOzoneClientMultipartUploadV1 {
     commitUploadPartInfo = ozoneOutputStream
             .getCommitUploadPartInfo();
 
-    assertNotNull(commitUploadPartInfo);
-    assertNotNull(commitUploadPartInfo.getPartName());
+    Assert.assertNotNull(commitUploadPartInfo);
+    Assert.assertNotNull(commitUploadPartInfo.getPartName());
 
     // PartName should be different from old part Name.
-    assertNotEquals("Part names should be different", partName,
+    Assert.assertNotEquals("Part names should be different", partName,
             commitUploadPartInfo.getPartName());
   }
 
+  @Test
+  public void testMultipartUploadWithPartsLessThanMinSize() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    // Initiate multipart upload
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    // Upload Parts
+    Map<Integer, String> partsMap = new TreeMap<>();
+    // Uploading part 1 with less than min size
+    String partName = uploadPart(bucket, keyName, uploadID, 1,
+            "data".getBytes(UTF_8));
+    partsMap.put(1, partName);
+
+    partName = uploadPart(bucket, keyName, uploadID, 2,
+            "data".getBytes(UTF_8));
+    partsMap.put(2, partName);
+
+    // Complete multipart upload
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent()
+          throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    // We have not uploaded any parts, but passing some list it should throw
+    // error.
+    TreeMap<Integer, String> partsMap = new TreeMap<>();
+    partsMap.put(1, UUID.randomUUID().toString());
+
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName()
+          throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
+
+    // passing with an incorrect part name, should throw INVALID_PART error.
+    TreeMap<Integer, String> partsMap = new TreeMap<>();
+    partsMap.put(1, UUID.randomUUID().toString());
+
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testMultipartUploadWithMissingParts() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
+
+    // passing with an incorrect part number, should throw INVALID_PART error.
+    TreeMap<Integer, String> partsMap = new TreeMap<>();
+    partsMap.put(3, "random");
+
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testCommitPartAfterCompleteUpload() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/";
+    String keyName = parentDir + UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    Assert.assertNotNull(omMultipartInfo.getUploadID());
+
+    String uploadID = omMultipartInfo.getUploadID();
+
+    // upload part 1.
+    byte[] data = generateData(5 * 1024 * 1024,
+            (byte) RandomUtils.nextLong());
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            data.length, 1, uploadID);
+    ozoneOutputStream.write(data, 0, data.length);
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
+            ozoneOutputStream.getCommitUploadPartInfo();
+
+    // Do not close output stream for part 2.
+    ozoneOutputStream = bucket.createMultipartKey(keyName,
+            data.length, 2, omMultipartInfo.getUploadID());
+    ozoneOutputStream.write(data, 0, data.length);
+
+    Map<Integer, String> partsMap = new LinkedHashMap<>();
+    partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
+    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
+            bucket.completeMultipartUpload(keyName,
+                    uploadID, partsMap);
+    Assert.assertNotNull(omMultipartUploadCompleteInfo);
+
+    Assert.assertNotNull(omMultipartCommitUploadPartInfo);
+
+    byte[] fileContent = new byte[data.length];
+    OzoneInputStream inputStream = bucket.readKey(keyName);
+    inputStream.read(fileContent);
+    StringBuilder sb = new StringBuilder(data.length);
+
+    // Combine all parts data, and check is it matching with get key data.
+    String part1 = new String(data, UTF_8);
+    sb.append(part1);
+    Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
+
+    try {
+      ozoneOutputStream.close();
+      Assert.fail("testCommitPartAfterCompleteUpload failed");
+    } catch (IOException ex) {
+      Assert.assertTrue(ex instanceof OMException);
+      Assert.assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
+              ((OMException) ex).getResult());
+    }
+  }
+
+  private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
+      ReplicationType replicationType, ReplicationFactor replicationFactor)
+          throws Exception {
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            replicationType, replicationFactor);
+
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertNotNull(uploadID);
+
+    return uploadID;
+  }
+
+  private String uploadPart(OzoneBucket bucket, String keyName, String
+      uploadID, int partNumber, byte[] data) throws Exception {
+
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            data.length, partNumber, uploadID);
+    ozoneOutputStream.write(data, 0,
+            data.length);
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
+            ozoneOutputStream.getCommitUploadPartInfo();
+
+    Assert.assertNotNull(omMultipartCommitUploadPartInfo);
+    Assert.assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
+
+    return omMultipartCommitUploadPartInfo.getPartName();
+  }
+
+  private void completeMultipartUpload(OzoneBucket bucket, String keyName,
+      String uploadID, Map<Integer, String> partsMap) throws Exception {
+    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket
+            .completeMultipartUpload(keyName, uploadID, partsMap);
+
+    Assert.assertNotNull(omMultipartUploadCompleteInfo);
+    Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), bucket
+            .getName());
+    Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), bucket
+            .getVolumeName());
+    Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), keyName);
+    Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash());
+  }
+
+  private byte[] generateData(int size, byte val) {
+    byte[] chars = new byte[size];
+    Arrays.fill(chars, val);
+    return chars;
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 6f6278a..db9f32e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortReq
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest;
 import org.apache.hadoop.ozone.om.request.security.OMCancelDelegationTokenRequest;
 import org.apache.hadoop.ozone.om.request.security.OMGetDelegationTokenRequest;
@@ -192,6 +193,9 @@ public final class OzoneManagerRatisUtils {
     case AbortMultiPartUpload:
       return new S3MultipartUploadAbortRequest(omRequest);
     case CompleteMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3MultipartUploadCompleteRequestV1(omRequest);
+      }
       return new S3MultipartUploadCompleteRequest(omRequest);
     case AddAcl:
     case RemoveAcl:
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 7f2d2c5..ebf86ce 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -542,9 +542,10 @@ public final class OMFileRequest {
    * @param omMetadataMgr
    * @param batchOp
    * @param omFileInfo
+   * @return db file key
    * @throws IOException
    */
-  public static void addToFileTable(OMMetadataManager omMetadataMgr,
+  public static String addToFileTable(OMMetadataManager omMetadataMgr,
                                     BatchOperation batchOp,
                                     OmKeyInfo omFileInfo)
           throws IOException {
@@ -554,6 +555,7 @@ public final class OMFileRequest {
 
     omMetadataMgr.getKeyTable().putWithBatch(batchOp,
             dbFileKey, omFileInfo);
+    return dbFileKey;
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
index 5546010..7aa21cf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
@@ -86,7 +86,8 @@ public class S3MultipartUploadCommitPartRequestV1
     boolean acquiredLock = false;
 
     IOException exception = null;
-    String partName = null;
+    String dbPartName;
+    String fullKeyPartName = null;
     OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
         getOmRequest());
     OMClientResponse omClientResponse = null;
@@ -95,8 +96,8 @@ public class S3MultipartUploadCommitPartRequestV1
     OmKeyInfo omKeyInfo = null;
     String multipartKey = null;
     OmMultipartKeyInfo multipartKeyInfo = null;
-    Result result = null;
-    OmBucketInfo omBucketInfo = null;
+    Result result;
+    OmBucketInfo omBucketInfo;
     OmBucketInfo copyBucketInfo = null;
     try {
       keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
@@ -141,14 +142,24 @@ public class S3MultipartUploadCommitPartRequestV1
       omKeyInfo.setDataSize(keyArgs.getDataSize());
       omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream()
           .map(OmKeyLocationInfo::getFromProtobuf)
-          .collect(Collectors.toList()));
+          .collect(Collectors.toList()), true);
       // Set Modification time
       omKeyInfo.setModificationTime(keyArgs.getModificationTime());
       // Set the UpdateID to current transactionLogIndex
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
-      String ozoneKey = omMetadataManager.getOzonePathKey(parentID, fileName);
-      partName = ozoneKey + clientID;
+      /**
+       * Format of PartName stored into MultipartInfoTable is,
+       * "fileName + ClientID".
+       *
+       * Contract is that all part names present in a multipart info will
+       * have same key prefix path.
+       *
+       * For example:
+       *        /vol1/buck1/a/b/c/part-1, /vol1/buck1/a/b/c/part-2,
+       *        /vol1/buck1/a/b/c/part-n
+       */
+      dbPartName = fileName + clientID;
 
       if (multipartKeyInfo == null) {
         // This can occur when user started uploading part by the time commit
@@ -168,9 +179,9 @@ public class S3MultipartUploadCommitPartRequestV1
       // Build this multipart upload part info.
       OzoneManagerProtocolProtos.PartKeyInfo.Builder partKeyInfo =
           OzoneManagerProtocolProtos.PartKeyInfo.newBuilder();
-      partKeyInfo.setPartName(partName);
+      partKeyInfo.setPartName(dbPartName);
       partKeyInfo.setPartNumber(partNumber);
-      partKeyInfo.setPartKeyInfo(omKeyInfo.getProtobuf(
+      partKeyInfo.setPartKeyInfo(omKeyInfo.getProtobuf(fileName,
           getOmRequest().getVersion()));
 
       // Add this part information in to multipartKeyInfo.
@@ -207,9 +218,15 @@ public class S3MultipartUploadCommitPartRequestV1
           keyArgs.getKeyLocationsList().size() * scmBlockSize * factor;
       omBucketInfo.incrUsedBytes(correctedSpace);
 
+      // Prepare response. Sets user given full key part name in 'partName'
+      // attribute in response object.
+      String fullOzoneKeyName = omMetadataManager.getOzoneKey(
+              volumeName, bucketName, keyName);
+      fullKeyPartName = fullOzoneKeyName + clientID;
       omResponse.setCommitMultiPartUploadResponse(
           MultipartCommitUploadPartResponse.newBuilder()
-              .setPartName(partName));
+              .setPartName(fullKeyPartName));
+
       omClientResponse = new S3MultipartUploadCommitPartResponseV1(
           omResponse.build(), multipartKey, openKey,
           multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
@@ -234,8 +251,8 @@ public class S3MultipartUploadCommitPartRequestV1
     }
 
     logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs,
-            auditMap, volumeName, bucketName, keyName, exception, partName,
-            result);
+            auditMap, volumeName, bucketName, keyName, exception,
+            fullKeyPartName, result);
 
     return omClientResponse;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index 7bba5bd..f008ac2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -18,14 +18,12 @@
 
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
+import com.google.common.base.Optional;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -35,7 +33,10 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -50,15 +51,19 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKey
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Optional;
-import org.apache.commons.codec.digest.DigestUtils;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Handle Multipart upload complete request.
@@ -172,129 +177,25 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
         }
 
         // First Check for Invalid Part Order.
-        int prevPartNumber = partsList.get(0).getPartNumber();
         List< Integer > partNumbers = new ArrayList<>();
-        int partsListSize = partsList.size();
-        partNumbers.add(prevPartNumber);
-        for (int i = 1; i < partsListSize; i++) {
-          int currentPartNumber = partsList.get(i).getPartNumber();
-          if (prevPartNumber >= currentPartNumber) {
-            LOG.error("PartNumber at index {} is {}, and its previous " +
-                    "partNumber at index {} is {} for ozonekey is " +
-                    "{}", i, currentPartNumber, i - 1, prevPartNumber,
-                ozoneKey);
-            throw new OMException(
-                failureMessage(requestedVolume, requestedBucket, keyName) +
-                " because parts are in Invalid order.",
-                OMException.ResultCodes.INVALID_PART_ORDER);
-          }
-          prevPartNumber = currentPartNumber;
-          partNumbers.add(prevPartNumber);
-        }
-
+        int partsListSize = getPartsListSize(requestedVolume,
+                requestedBucket, keyName, ozoneKey, partNumbers, partsList);
 
         List<OmKeyLocationInfo> partLocationInfos = new ArrayList<>();
-        long dataSize = 0;
-        int currentPartCount = 0;
-        // Now do actual logic, and check for any Invalid part during this.
-        for (OzoneManagerProtocolProtos.Part part : partsList) {
-          currentPartCount++;
-          int partNumber = part.getPartNumber();
-          String partName = part.getPartName();
-
-          PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber);
-
-          if (partKeyInfo == null ||
-              !partName.equals(partKeyInfo.getPartName())) {
-            String omPartName = partKeyInfo == null ? null :
-                partKeyInfo.getPartName();
-            throw new OMException(
-                failureMessage(requestedVolume, requestedBucket, keyName) +
-                ". Provided Part info is { " + partName + ", " + partNumber +
-                "}, whereas OM has partName " + omPartName,
-                OMException.ResultCodes.INVALID_PART);
-          }
-
-          OmKeyInfo currentPartKeyInfo = OmKeyInfo
-              .getFromProtobuf(partKeyInfo.getPartKeyInfo());
-
-          // Except for last part all parts should have minimum size.
-          if (currentPartCount != partsListSize) {
-            if (currentPartKeyInfo.getDataSize() <
-                ozoneManager.getMinMultipartUploadPartSize()) {
-              LOG.error("MultipartUpload: {} Part number: {} size {}  is less" +
-                      " than minimum part size {}", ozoneKey,
-                  partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(),
-                  ozoneManager.getMinMultipartUploadPartSize());
-              throw new OMException(
-                  failureMessage(requestedVolume, requestedBucket, keyName) +
-                  ". Entity too small.",
-                  OMException.ResultCodes.ENTITY_TOO_SMALL);
-            }
-          }
-
-          // As all part keys will have only one version.
-          OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo
-              .getKeyLocationVersions().get(0);
-
-          // Set partNumber in each block.
-          currentKeyInfoGroup.getLocationList().forEach(
-              omKeyLocationInfo -> omKeyLocationInfo.setPartNumber(partNumber));
-
-          partLocationInfos.addAll(currentKeyInfoGroup.getLocationList());
-          dataSize += currentPartKeyInfo.getDataSize();
-        }
+        long dataSize = getMultipartDataSize(requestedVolume, requestedBucket,
+                keyName, ozoneKey, partKeyInfoMap, partsListSize,
+                partLocationInfos, partsList, ozoneManager);
 
         // All parts have same replication information. Here getting from last
         // part.
-        HddsProtos.ReplicationType type = partKeyInfoMap.lastEntry().getValue()
-            .getPartKeyInfo().getType();
-        HddsProtos.ReplicationFactor factor =
-            partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor();
-
-        OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-        if (omKeyInfo == null) {
-          // This is a newly added key, it does not have any versions.
-          OmKeyLocationInfoGroup keyLocationInfoGroup = new
-              OmKeyLocationInfoGroup(0, partLocationInfos, true);
-
-          // Get the objectID of the key from OpenKeyTable
-          OmKeyInfo dbOpenKeyInfo = omMetadataManager.getOpenKeyTable()
-              .get(multipartKey);
-
-          // A newly created key, this is the first version.
-          OmKeyInfo.Builder builder =
-              new OmKeyInfo.Builder().setVolumeName(volumeName)
-              .setBucketName(bucketName).setKeyName(keyName)
-              .setReplicationFactor(factor).setReplicationType(type)
-              .setCreationTime(keyArgs.getModificationTime())
-              .setModificationTime(keyArgs.getModificationTime())
-              .setDataSize(dataSize)
-              .setFileEncryptionInfo(dbOpenKeyInfo.getFileEncryptionInfo())
-              .setOmKeyLocationInfos(
-                  Collections.singletonList(keyLocationInfoGroup))
-              .setAcls(dbOpenKeyInfo.getAcls());
-          // Check if db entry has ObjectID. This check is required because
-          // it is possible that between multipart key uploads and complete,
-          // we had an upgrade.
-          if (dbOpenKeyInfo.getObjectID() != 0) {
-            builder.setObjectID(dbOpenKeyInfo.getObjectID());
-          }
-          omKeyInfo = builder.build();
-        } else {
-          // Already a version exists, so we should add it as a new version.
-          // But now as versioning is not supported, just following the commit
-          // key approach. When versioning support comes, then we can uncomment
-          // below code keyInfo.addNewVersion(locations);
-          omKeyInfo.updateLocationInfoList(partLocationInfos, true);
-          omKeyInfo.setModificationTime(keyArgs.getModificationTime());
-          omKeyInfo.setDataSize(dataSize);
-        }
-        omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+        OmKeyInfo omKeyInfo = getOmKeyInfo(ozoneManager, trxnLogIndex, keyArgs,
+                volumeName, bucketName, keyName, multipartKey,
+                omMetadataManager, ozoneKey, partKeyInfoMap, partLocationInfos,
+                dataSize);
 
         //Find all unused parts.
-        List< OmKeyInfo > unUsedParts = new ArrayList<>();
-        for (Map.Entry< Integer, PartKeyInfo > partKeyInfo :
+        List<OmKeyInfo> unUsedParts = new ArrayList<>();
+        for (Map.Entry< Integer, PartKeyInfo> partKeyInfo :
             partKeyInfoMap.entrySet()) {
           if (!partNumbers.contains(partKeyInfo.getKey())) {
             unUsedParts.add(OmKeyInfo
@@ -337,6 +238,19 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
       }
     }
 
+    logResult(ozoneManager, multipartUploadCompleteRequest, partsList,
+            auditMap, volumeName, bucketName, keyName, exception, result);
+
+    return omClientResponse;
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected void logResult(OzoneManager ozoneManager,
+      MultipartUploadCompleteRequest multipartUploadCompleteRequest,
+      List<OzoneManagerProtocolProtos.Part> partsList,
+      Map<String, String> auditMap, String volumeName,
+      String bucketName, String keyName, IOException exception,
+      Result result) {
     auditMap.put(OzoneConsts.MULTIPART_LIST, partsList.toString());
 
     // audit log
@@ -359,8 +273,183 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
       LOG.error("Unrecognized Result for S3MultipartUploadCommitRequest: {}",
           multipartUploadCompleteRequest);
     }
+  }
 
-    return omClientResponse;
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex,
+      KeyArgs keyArgs, String volumeName, String bucketName, String keyName,
+      String multipartKey, OMMetadataManager omMetadataManager,
+      String ozoneKey, TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
+      List<OmKeyLocationInfo> partLocationInfos, long dataSize)
+          throws IOException {
+    HddsProtos.ReplicationType type = partKeyInfoMap.lastEntry().getValue()
+        .getPartKeyInfo().getType();
+    HddsProtos.ReplicationFactor factor =
+        partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor();
+
+    OmKeyInfo omKeyInfo = getOmKeyInfoFromKeyTable(ozoneKey, keyName,
+            omMetadataManager);
+    if (omKeyInfo == null) {
+      // This is a newly added key, it does not have any versions.
+      OmKeyLocationInfoGroup keyLocationInfoGroup = new
+          OmKeyLocationInfoGroup(0, partLocationInfos, true);
+
+      // Get the objectID of the key from OpenKeyTable
+      OmKeyInfo dbOpenKeyInfo = getOmKeyInfoFromOpenKeyTable(multipartKey,
+              keyName, omMetadataManager);
+
+      // A newly created key, this is the first version.
+      OmKeyInfo.Builder builder =
+          new OmKeyInfo.Builder().setVolumeName(volumeName)
+          .setBucketName(bucketName).setKeyName(dbOpenKeyInfo.getKeyName())
+          .setReplicationFactor(factor).setReplicationType(type)
+          .setCreationTime(keyArgs.getModificationTime())
+          .setModificationTime(keyArgs.getModificationTime())
+          .setDataSize(dataSize)
+          .setFileEncryptionInfo(dbOpenKeyInfo.getFileEncryptionInfo())
+          .setOmKeyLocationInfos(
+              Collections.singletonList(keyLocationInfoGroup))
+          .setAcls(dbOpenKeyInfo.getAcls());
+      // Check if db entry has ObjectID. This check is required because
+      // it is possible that between multipart key uploads and complete,
+      // we had an upgrade.
+      if (dbOpenKeyInfo.getObjectID() != 0) {
+        builder.setObjectID(dbOpenKeyInfo.getObjectID());
+      }
+      updatePrefixFSOInfo(dbOpenKeyInfo, builder);
+      omKeyInfo = builder.build();
+    } else {
+      // Already a version exists, so we should add it as a new version.
+      // But now as versioning is not supported, just following the commit
+      // key approach. When versioning support comes, then we can uncomment
+      // below code keyInfo.addNewVersion(locations);
+      omKeyInfo.updateLocationInfoList(partLocationInfos, true);
+      omKeyInfo.setModificationTime(keyArgs.getModificationTime());
+      omKeyInfo.setDataSize(dataSize);
+    }
+    omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+    return omKeyInfo;
+  }
+
+  protected void updatePrefixFSOInfo(OmKeyInfo dbOpenKeyInfo,
+      OmKeyInfo.Builder builder) {
+    // FSOBucket is disabled. Do nothing.
+  }
+
+  protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return omMetadataManager.getKeyTable().get(dbOzoneKey);
+  }
+
+  protected OmKeyInfo getOmKeyInfoFromOpenKeyTable(String dbMultipartKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return omMetadataManager.getOpenKeyTable().get(dbMultipartKey);
+  }
+
+  protected int getPartsListSize(String requestedVolume,
+      String requestedBucket, String keyName, String ozoneKey,
+      List<Integer> partNumbers,
+      List<OzoneManagerProtocolProtos.Part> partsList) throws OMException {
+    int prevPartNumber = partsList.get(0).getPartNumber();
+    int partsListSize = partsList.size();
+    partNumbers.add(prevPartNumber);
+    for (int i = 1; i < partsListSize; i++) {
+      int currentPartNumber = partsList.get(i).getPartNumber();
+      if (prevPartNumber >= currentPartNumber) {
+        LOG.error("PartNumber at index {} is {}, and its previous " +
+                "partNumber at index {} is {} for ozonekey is " +
+                "{}", i, currentPartNumber, i - 1, prevPartNumber,
+            ozoneKey);
+        throw new OMException(
+            failureMessage(requestedVolume, requestedBucket, keyName) +
+            " because parts are in Invalid order.",
+            OMException.ResultCodes.INVALID_PART_ORDER);
+      }
+      prevPartNumber = currentPartNumber;
+      partNumbers.add(prevPartNumber);
+    }
+    return partsListSize;
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected long getMultipartDataSize(String requestedVolume,
+      String requestedBucket, String keyName, String ozoneKey,
+      TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
+      int partsListSize, List<OmKeyLocationInfo> partLocationInfos,
+      List<OzoneManagerProtocolProtos.Part> partsList,
+      OzoneManager ozoneManager) throws OMException {
+    long dataSize = 0;
+    int currentPartCount = 0;
+    // Now do actual logic, and check for any Invalid part during this.
+    for (OzoneManagerProtocolProtos.Part part : partsList) {
+      currentPartCount++;
+      int partNumber = part.getPartNumber();
+      String partName = part.getPartName();
+
+      PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber);
+
+      String dbPartName = null;
+      if (partKeyInfo != null) {
+        dbPartName = preparePartName(requestedVolume, requestedBucket, keyName,
+                partKeyInfo, ozoneManager.getMetadataManager());
+      }
+      if (!StringUtils.equals(partName, dbPartName)) {
+        String omPartName = partKeyInfo == null ? null : dbPartName;
+        throw new OMException(
+            failureMessage(requestedVolume, requestedBucket, keyName) +
+            ". Provided Part info is { " + partName + ", " + partNumber +
+            "}, whereas OM has partName " + omPartName,
+            OMException.ResultCodes.INVALID_PART);
+      }
+
+      OmKeyInfo currentPartKeyInfo = OmKeyInfo
+          .getFromProtobuf(partKeyInfo.getPartKeyInfo());
+
+      // Except for last part all parts should have minimum size.
+      if (currentPartCount != partsListSize) {
+        if (currentPartKeyInfo.getDataSize() <
+            ozoneManager.getMinMultipartUploadPartSize()) {
+          LOG.error("MultipartUpload: {} Part number: {} size {}  is less" +
+                  " than minimum part size {}", ozoneKey,
+              partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(),
+              ozoneManager.getMinMultipartUploadPartSize());
+          throw new OMException(
+              failureMessage(requestedVolume, requestedBucket, keyName) +
+                  ". Entity too small.",
+              OMException.ResultCodes.ENTITY_TOO_SMALL);
+        }
+      }
+
+      // As all part keys will have only one version.
+      OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo
+          .getKeyLocationVersions().get(0);
+
+      // Set partNumber in each block.
+      currentKeyInfoGroup.getLocationList().forEach(
+          omKeyLocationInfo -> omKeyLocationInfo.setPartNumber(partNumber));
+
+      partLocationInfos.addAll(currentKeyInfoGroup.getLocationList());
+      dataSize += currentPartKeyInfo.getDataSize();
+    }
+    return dataSize;
+  }
+
+  private String preparePartName(String requestedVolume,
+      String requestedBucket, String keyName, PartKeyInfo partKeyInfo,
+      OMMetadataManager omMetadataManager) {
+
+    String partName;
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      String parentPath = OzoneFSUtils.getParent(keyName);
+      StringBuffer keyPath = new StringBuffer(parentPath);
+      keyPath.append(partKeyInfo.getPartName());
+
+      partName = omMetadataManager.getOzoneKey(requestedVolume,
+              requestedBucket, keyPath.toString());
+    } else {
+      partName = partKeyInfo.getPartName();
+    }
+    return partName;
   }
 
   private static String failureMessage(String volume, String bucket,
@@ -369,7 +458,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
         volume + " bucket: " + bucket + " key: " + keyName;
   }
 
-  private void updateCache(OMMetadataManager omMetadataManager,
+  protected void updateCache(OMMetadataManager omMetadataManager,
       String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo,
       long transactionLogIndex) {
     // Update cache.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestV1.java
new file mode 100644
index 0000000..4ab9ee7
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestV1.java
@@ -0,0 +1,268 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import com.google.common.base.Optional;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
+
+/**
+ * Handle Multipart upload complete request.
+ */
+public class S3MultipartUploadCompleteRequestV1
+        extends S3MultipartUploadCompleteRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(S3MultipartUploadCompleteRequestV1.class);
+
+  public S3MultipartUploadCompleteRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    MultipartUploadCompleteRequest multipartUploadCompleteRequest =
+        getOmRequest().getCompleteMultiPartUploadRequest();
+
+    KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs();
+
+    List<OzoneManagerProtocolProtos.Part> partsList =
+        multipartUploadCompleteRequest.getPartsListList();
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    final String requestedVolume = volumeName;
+    final String requestedBucket = bucketName;
+    String keyName = keyArgs.getKeyName();
+    String uploadID = keyArgs.getMultipartUploadID();
+    String dbMultipartKey;
+
+    ozoneManager.getMetrics().incNumCompleteMultipartUploads();
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    boolean acquiredLock = false;
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    OMClientResponse omClientResponse = null;
+    IOException exception = null;
+    Result result;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      Path keyPath = Paths.get(keyName);
+      OMFileRequest.OMPathInfoV1 pathInfoV1 =
+              OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+                      volumeName, bucketName, keyName, keyPath);
+      long parentID = pathInfoV1.getLastKnownParentId();
+
+      dbMultipartKey = omMetadataManager.getMultipartKey(parentID,
+              fileName, uploadID);
+
+      String dbOzoneKey = omMetadataManager.getOzonePathKey(parentID, fileName);
+
+      String ozoneKey = omMetadataManager.getOzoneKey(
+              volumeName, bucketName, keyName);
+
+      OmMultipartKeyInfo multipartKeyInfo =
+              omMetadataManager.getMultipartInfoTable().get(dbMultipartKey);
+
+      // Check for directory exists with same name, if it exists throw error.
+      if (pathInfoV1.getDirectoryResult() == DIRECTORY_EXISTS) {
+        throw new OMException("Can not Complete MPU for file: " + keyName +
+                " as there is already directory in the given path",
+                NOT_A_FILE);
+      }
+
+      if (multipartKeyInfo == null) {
+        throw new OMException(
+            failureMessage(requestedVolume, requestedBucket, keyName),
+            OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
+      }
+      TreeMap<Integer, PartKeyInfo> partKeyInfoMap =
+          multipartKeyInfo.getPartKeyInfoMap();
+
+      if (partsList.size() > 0) {
+        if (partKeyInfoMap.size() == 0) {
+          LOG.error("Complete MultipartUpload failed for key {} , MPU Key has" +
+                  " no parts in OM, parts given to upload are {}", ozoneKey,
+              partsList);
+          throw new OMException(
+              failureMessage(requestedVolume, requestedBucket, keyName),
+              OMException.ResultCodes.INVALID_PART);
+        }
+
+        // First Check for Invalid Part Order.
+        List< Integer > partNumbers = new ArrayList<>();
+        int partsListSize = getPartsListSize(requestedVolume,
+                requestedBucket, keyName, ozoneKey, partNumbers, partsList);
+
+        List<OmKeyLocationInfo> partLocationInfos = new ArrayList<>();
+        long dataSize = getMultipartDataSize(requestedVolume, requestedBucket,
+                keyName, ozoneKey, partKeyInfoMap, partsListSize,
+                partLocationInfos, partsList, ozoneManager);
+
+        // All parts have same replication information. Here getting from last
+        // part.
+        OmKeyInfo omKeyInfo = getOmKeyInfo(ozoneManager, trxnLogIndex, keyArgs,
+                volumeName, bucketName, keyName, dbMultipartKey,
+                omMetadataManager, dbOzoneKey, partKeyInfoMap,
+                partLocationInfos, dataSize);
+
+        //Find all unused parts.
+        List< OmKeyInfo > unUsedParts = new ArrayList<>();
+        for (Map.Entry< Integer, PartKeyInfo > partKeyInfo :
+            partKeyInfoMap.entrySet()) {
+          if (!partNumbers.contains(partKeyInfo.getKey())) {
+            unUsedParts.add(OmKeyInfo
+                .getFromProtobuf(partKeyInfo.getValue().getPartKeyInfo()));
+          }
+        }
+
+        updateCache(omMetadataManager, dbOzoneKey, dbMultipartKey, omKeyInfo,
+            trxnLogIndex);
+
+        omResponse.setCompleteMultiPartUploadResponse(
+            MultipartUploadCompleteResponse.newBuilder()
+                .setVolume(requestedVolume)
+                .setBucket(requestedBucket)
+                .setKey(keyName)
+                .setHash(DigestUtils.sha256Hex(keyName)));
+
+        omClientResponse = new S3MultipartUploadCompleteResponseV1(
+            omResponse.build(), dbMultipartKey, omKeyInfo, unUsedParts);
+
+        result = Result.SUCCESS;
+      } else {
+        throw new OMException(
+            failureMessage(requestedVolume, requestedBucket, keyName) +
+            " because of empty part list",
+            OMException.ResultCodes.INVALID_REQUEST);
+      }
+
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new S3MultipartUploadCompleteResponseV1(
+          createErrorOMResponse(omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+          omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK,
+            volumeName, bucketName);
+      }
+    }
+
+    logResult(ozoneManager, multipartUploadCompleteRequest, partsList,
+            auditMap, volumeName, bucketName, keyName, exception, result);
+
+    return omClientResponse;
+  }
+
+  protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneFileKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+            omMetadataManager, dbOzoneFileKey, keyName);
+  }
+
+  @Override
+  protected OmKeyInfo getOmKeyInfoFromOpenKeyTable(String dbMultipartKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+            omMetadataManager, dbMultipartKey, keyName);
+  }
+
+  @Override
+  protected void updateCache(OMMetadataManager omMetadataManager,
+      String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo,
+      long transactionLogIndex) {
+    // Update cache.
+    // 1. Add key entry to key table.
+    // 2. Delete multipartKey entry from openKeyTable and multipartInfo table.
+    OMFileRequest.addFileTableCacheEntry(omMetadataManager, ozoneKey,
+            omKeyInfo, omKeyInfo.getFileName(), transactionLogIndex);
+
+    omMetadataManager.getOpenKeyTable().addCacheEntry(
+            new CacheKey<>(multipartKey),
+            new CacheValue<>(Optional.absent(), transactionLogIndex));
+    omMetadataManager.getMultipartInfoTable().addCacheEntry(
+            new CacheKey<>(multipartKey),
+            new CacheValue<>(Optional.absent(), transactionLogIndex));
+  }
+
+  protected void updatePrefixFSOInfo(OmKeyInfo dbOpenKeyInfo,
+                                     OmKeyInfo.Builder builder) {
+    // updates parentID and fileName
+    builder.setParentObjectID(dbOpenKeyInfo.getParentObjectID());
+    builder.setFileName(dbOpenKeyInfo.getFileName());
+  }
+
+  private static String failureMessage(String volume, String bucket,
+                                       String keyName) {
+    return "Complete Multipart Upload Failed: volume: " +
+        volume + " bucket: " + bucket + " key: " + keyName;
+  }
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
index 20e398e..f593885 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
@@ -96,4 +96,15 @@ public class S3MultipartUploadCompleteResponse extends OMClientResponse {
     }
   }
 
-}
\ No newline at end of file
+  protected String getMultipartKey() {
+    return multipartKey;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected List<OmKeyInfo> getPartsUnusedList() {
+    return partsUnusedList;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseV1.java
similarity index 66%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseV1.java
index 20e398e..bb31dce 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseV1.java
@@ -18,82 +18,74 @@
 
 package org.apache.hadoop.ozone.om.response.s3.multipart;
 
-import java.io.IOException;
-import java.util.List;
-
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
 import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
 
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
  * Response for Multipart Upload Complete request.
  */
-@CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE, KEY_TABLE, DELETED_TABLE,
-    MULTIPARTINFO_TABLE})
-public class S3MultipartUploadCompleteResponse extends OMClientResponse {
-  private String multipartKey;
-  private OmKeyInfo omKeyInfo;
-  private List<OmKeyInfo> partsUnusedList;
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, FILE_TABLE, DELETED_TABLE,
+    MULTIPARTFILEINFO_TABLE})
+public class S3MultipartUploadCompleteResponseV1
+        extends S3MultipartUploadCompleteResponse {
 
-  public S3MultipartUploadCompleteResponse(
+  public S3MultipartUploadCompleteResponseV1(
       @Nonnull OMResponse omResponse,
       @Nonnull String multipartKey,
       @Nonnull OmKeyInfo omKeyInfo,
       @Nonnull List<OmKeyInfo> unUsedParts) {
-    super(omResponse);
-    this.partsUnusedList = unUsedParts;
-    this.multipartKey = multipartKey;
-    this.omKeyInfo = omKeyInfo;
+    super(omResponse, multipartKey, omKeyInfo, unUsedParts);
   }
 
   /**
    * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
-  public S3MultipartUploadCompleteResponse(@Nonnull OMResponse omResponse) {
+  public S3MultipartUploadCompleteResponseV1(@Nonnull OMResponse omResponse) {
     super(omResponse);
     checkStatusNotOK();
   }
 
+
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
     omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-        multipartKey);
+            getMultipartKey());
     omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation,
-        multipartKey);
+            getMultipartKey());
 
-    String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-        omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
-    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey,
-        omKeyInfo);
+    String dbFileKey = OMFileRequest.addToFileTable(omMetadataManager,
+            batchOperation, getOmKeyInfo());
 
-    if (!partsUnusedList.isEmpty()) {
+    if (!getPartsUnusedList().isEmpty()) {
       // Add unused parts to deleted key table.
       RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable()
-          .get(ozoneKey);
+              .get(dbFileKey);
       if (repeatedOmKeyInfo == null) {
-        repeatedOmKeyInfo = new RepeatedOmKeyInfo(partsUnusedList);
+        repeatedOmKeyInfo = new RepeatedOmKeyInfo(getPartsUnusedList());
       } else {
-        repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo);
+        repeatedOmKeyInfo.addOmKeyInfo(getOmKeyInfo());
       }
 
       omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-          ozoneKey, repeatedOmKeyInfo);
+              dbFileKey, repeatedOmKeyInfo);
     }
   }
+}
 
-}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index 9f6cff8..16cb4ae 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -115,7 +115,7 @@ public class TestS3MultipartRequest {
             keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(omRequest);
+        getS3InitiateMultipartUploadReq(omRequest);
 
     OMRequest modifiedRequest =
         s3InitiateMultipartUploadRequest.preExecute(ozoneManager);
@@ -204,7 +204,7 @@ public class TestS3MultipartRequest {
             keyName, multipartUploadID, partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(omRequest);
+            getS3MultipartUploadCompleteReq(omRequest);
 
     OMRequest modifiedRequest =
         s3MultipartUploadCompleteRequest.preExecute(ozoneManager);
@@ -247,6 +247,11 @@ public class TestS3MultipartRequest {
     return modifiedRequest;
   }
 
+  protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCompleteRequest(omRequest);
+  }
+
   protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
           OMRequest omRequest) {
     return new S3MultipartUploadCommitPartRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
index a04f51f..3d399b1 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
@@ -55,7 +56,7 @@ public class TestS3MultipartUploadCompleteRequest
   public void testValidateAndUpdateCacheSuccess() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
@@ -64,7 +65,7 @@ public class TestS3MultipartUploadCompleteRequest
         bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
+        getS3InitiateMultipartUploadReq(initiateMPURequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -78,27 +79,25 @@ public class TestS3MultipartUploadCompleteRequest
         bucketName, keyName, clientID, multipartUploadID, 1);
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
     // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+    addKeyToTable(volumeName, bucketName, keyName, clientID);
 
     s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
         2L, ozoneManagerDoubleBufferHelper);
 
     List<Part> partList = new ArrayList<>();
 
-    partList.add(Part.newBuilder().setPartName(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName) +
-            clientID).setPartNumber(1).build());
+    String partName = getPartName(volumeName, bucketName, keyName, clientID);
+    partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1)
+            .build());
 
     OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
         bucketName, keyName, multipartUploadID, partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+        getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -107,14 +106,71 @@ public class TestS3MultipartUploadCompleteRequest
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+            multipartUploadID);
 
     Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
     Assert.assertNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
     Assert.assertNotNull(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+            getOzoneDBKey(volumeName, bucketName, keyName)));
+  }
+
+  @Test
+  public void testInvalidPartOrderError() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
+            bucketName, keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+            getS3InitiateMultipartUploadReq(initiateMPURequest);
+
+    OMClientResponse omClientResponse =
+            s3InitiateMultipartUploadRequest.validateAndUpdateCache(
+                    ozoneManager, 1L, ozoneManagerDoubleBufferHelper);
+
+    long clientID = Time.now();
+    String multipartUploadID = omClientResponse.getOMResponse()
+            .getInitiateMultiPartUploadResponse().getMultipartUploadID();
+
+    OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName,
+            bucketName, keyName, clientID, multipartUploadID, 1);
+
+    S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
+            getS3MultipartUploadCommitReq(commitMultipartRequest);
+
+    // Add key to open key table.
+    addKeyToTable(volumeName, bucketName, keyName, clientID);
+
+    s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
+            2L, ozoneManagerDoubleBufferHelper);
+
+    List<Part> partList = new ArrayList<>();
+
+    String partName = getPartName(volumeName, bucketName, keyName, clientID);
+    partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23)
+            .build());
+    partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1)
+            .build());
+
+    OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
+            bucketName, keyName, multipartUploadID, partList);
+
+    S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
+            getS3MultipartUploadCompleteReq(completeMultipartRequest);
+
+    omClientResponse =
+            s3MultipartUploadCompleteRequest.validateAndUpdateCache(
+                    ozoneManager, 3L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_PART_ORDER,
+            omClientResponse.getOMResponse().getStatus());
   }
 
   @Test
@@ -129,7 +185,7 @@ public class TestS3MultipartUploadCompleteRequest
         bucketName, keyName, UUID.randomUUID().toString(), partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+        getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -153,7 +209,7 @@ public class TestS3MultipartUploadCompleteRequest
         bucketName, keyName, UUID.randomUUID().toString(), partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+            getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -180,7 +236,7 @@ public class TestS3MultipartUploadCompleteRequest
 
     // Doing  complete multipart upload request with out initiate.
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+            getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -191,5 +247,35 @@ public class TestS3MultipartUploadCompleteRequest
         omClientResponse.getOMResponse().getStatus());
 
   }
+
+  protected void addKeyToTable(String volumeName, String bucketName,
+                             String keyName, long clientID) throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
+            keyName, clientID, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) throws IOException {
+    return omMetadataManager.getMultipartKey(volumeName,
+            bucketName, keyName, multipartUploadID);
+  }
+
+  private String getPartName(String volumeName, String bucketName,
+      String keyName, long clientID) throws IOException {
+
+    String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+    return dbOzoneKey + clientID;
+  }
+
+  protected String getOzoneDBKey(String volumeName, String bucketName,
+                                 String keyName) throws IOException {
+    return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
+  }
+
+  protected String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestV1.java
new file mode 100644
index 0000000..cd5051f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestV1.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
+import java.util.UUID;
+
+/**
+ * Tests S3 Multipart Upload Complete request.
+ */
+public class TestS3MultipartUploadCompleteRequestV1
+    extends TestS3MultipartUploadCompleteRequest {
+
+  @BeforeClass
+  public static void init() {
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+  }
+
+  protected String getKeyName() {
+    String parentDir = UUID.randomUUID().toString() + "/a/b/c";
+    String fileName = "file1";
+    String keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName;
+    return keyName;
+  }
+
+  protected void addKeyToTable(String volumeName, String bucketName,
+      String keyName, long clientID) throws Exception {
+    // need to initialize parentID
+    String parentDir = OzoneFSUtils.getParentDir(keyName);
+    Assert.assertNotEquals("Parent doesn't exists!", parentDir, keyName);
+
+    // add parentDir to dirTable
+    long parentID = getParentID(volumeName, bucketName, keyName);
+    long txnId = 50;
+    long objectId = parentID + 1;
+
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
+                    Time.now());
+
+    // add key to openFileTable
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    omKeyInfoV1.setKeyName(fileName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientID, omKeyInfoV1.getObjectID(),
+            omMetadataManager);
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) throws IOException {
+    OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists(
+            omMetadataManager, volumeName,
+            bucketName, keyName, 0);
+
+    Assert.assertNotNull("key not found in DB!", keyStatus);
+
+    return omMetadataManager.getMultipartKey(keyStatus.getKeyInfo()
+                    .getParentObjectID(), keyStatus.getTrimmedName(),
+            multipartUploadID);
+  }
+
+  private long getParentID(String volumeName, String bucketName,
+                           String keyName) throws IOException {
+    Path keyPath = Paths.get(keyName);
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+
+    return OMFileRequest.getParentID(omBucketInfo.getObjectID(),
+            elements, keyName, omMetadataManager);
+  }
+
+  protected String getOzoneDBKey(String volumeName, String bucketName,
+                                 String keyName) throws IOException {
+    long parentID = getParentID(volumeName, bucketName, keyName);
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return omMetadataManager.getOzonePathKey(parentID, fileName);
+  }
+
+  protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCompleteRequestV1(omRequest);
+  }
+
+  protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCommitPartRequestV1(omRequest);
+  }
+
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+          OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequestV1(initiateMPURequest);
+  }
+
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 106ae61..6f4d6fa 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -268,6 +268,27 @@ public class TestS3MultipartResponse {
             openPartKeyInfoToBeDeleted, isRatisEnabled, omBucketInfo);
   }
 
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseV1(
+          String volumeName, String bucketName, long parentID, String keyName,
+          String multipartUploadID, OmKeyInfo omKeyInfo,
+          OzoneManagerProtocolProtos.Status status,
+          List<OmKeyInfo> unUsedParts) {
+
+    String multipartKey = getMultipartKey(parentID, keyName, multipartUploadID);
+
+    OMResponse omResponse = OMResponse.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.CompleteMultiPartUpload)
+            .setStatus(status).setSuccess(true)
+            .setCompleteMultiPartUploadResponse(
+                    OzoneManagerProtocolProtos.MultipartUploadCompleteResponse
+                            .newBuilder().setBucket(bucketName)
+                            .setVolume(volumeName).setKey(keyName)).build();
+
+    return new S3MultipartUploadCompleteResponseV1(omResponse, multipartKey,
+            omKeyInfo, unUsedParts);
+  }
+
   private String getMultipartKey(long parentID, String keyName,
                                  String multipartUploadID) {
     String fileName = OzoneFSUtils.getFileName(keyName);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseV1.java
new file mode 100644
index 0000000..2683273
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseV1.java
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Test multipart upload complete response.
+ */
+public class TestS3MultipartUploadCompleteResponseV1
+    extends TestS3MultipartResponse {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  @Test
+  public void testAddDBToBatch() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    long txnId = 50;
+    long objectId = parentID + 1;
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String dbMultipartKey = omMetadataManager.getMultipartKey(parentID,
+            fileName, multipartUploadID);
+    long clientId = Time.now();
+    String dbOpenKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+    String dbKey = omMetadataManager.getOzonePathKey(parentID, fileName);
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
+                    Time.now());
+
+    // add key to openFileTable
+    omKeyInfoV1.setKeyName(fileName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientId, omKeyInfoV1.getObjectID(),
+            omMetadataManager);
+
+    addS3MultipartUploadCommitPartResponseV1(volumeName, bucketName, keyName,
+            multipartUploadID, dbOpenKey);
+
+    List<OmKeyInfo> unUsedParts = new ArrayList<>();
+    S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse =
+            createS3CompleteMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, omKeyInfoV1,
+                OzoneManagerProtocolProtos.Status.OK, unUsedParts);
+
+    s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    Assert.assertNotNull(omMetadataManager.getKeyTable().get(dbKey));
+    Assert.assertNull(
+        omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
+    Assert.assertNull(
+            omMetadataManager.getOpenKeyTable().get(dbMultipartKey));
+
+    // As no parts are created, so no entries should be there in delete table.
+    Assert.assertEquals(0, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+  }
+
+  @Test
+  public void testAddDBToBatchWithParts() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    createParentPath(volumeName, bucketName);
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    int deleteEntryCount = 0;
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String dbMultipartKey = omMetadataManager.getMultipartKey(parentID,
+            fileName, multipartUploadID);
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+            addS3InitiateMultipartUpload(volumeName, bucketName, keyName,
+                    multipartUploadID);
+
+    // Add some dummy parts for testing.
+    // Not added any key locations, as this just test is to see entries are
+    // adding to delete table or not.
+    OmMultipartKeyInfo omMultipartKeyInfo =
+            s3InitiateMultipartUploadResponseV1.getOmMultipartKeyInfo();
+
+    OmKeyInfo omKeyInfoV1 = commitS3MultipartUpload(volumeName, bucketName,
+            keyName, multipartUploadID, fileName, dbMultipartKey,
+            omMultipartKeyInfo);
+    // After commits, it adds an entry to the deleted table.
+    deleteEntryCount++;
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentID + 10,
+                    parentID, 100, Time.now());
+    List<OmKeyInfo> unUsedParts = new ArrayList<>();
+    unUsedParts.add(omKeyInfo);
+    S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse =
+            createS3CompleteMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, omKeyInfoV1,
+                    OzoneManagerProtocolProtos.Status.OK, unUsedParts);
+
+    s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+    String dbKey = omMetadataManager.getOzonePathKey(parentID,
+          omKeyInfoV1.getFileName());
+    Assert.assertNotNull(omMetadataManager.getKeyTable().get(dbKey));
+    Assert.assertNull(
+            omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
+    Assert.assertNull(
+            omMetadataManager.getOpenKeyTable().get(dbMultipartKey));
+
+    // As 1 unused parts exists, so 1 unused entry should be there in delete
+    // table.
+    deleteEntryCount++;
+    Assert.assertEquals(deleteEntryCount, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+  }
+
+  private OmKeyInfo commitS3MultipartUpload(String volumeName,
+      String bucketName, String keyName, String multipartUploadID,
+      String fileName, String multipartKey,
+      OmMultipartKeyInfo omMultipartKeyInfo) throws IOException {
+
+    PartKeyInfo part1 = createPartKeyInfoV1(volumeName, bucketName, parentID,
+        fileName, 1);
+
+    addPart(1, part1, omMultipartKeyInfo);
+
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID,
+                    omMultipartKeyInfo.getPartKeyInfo(1),
+                    omMultipartKeyInfo,
+                    OzoneManagerProtocolProtos.Status.OK,  openKey);
+
+    s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+            batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNull(
+        omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    // As 1 parts are created, so 1 entry should be there in delete table.
+    Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+        omMetadataManager.getDeletedTable()));
+
+    String part1DeletedKeyName =
+        omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
+
+    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+        part1DeletedKeyName));
+
+    RepeatedOmKeyInfo ro =
+        omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
+    OmKeyInfo omPartKeyInfo = OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo());
+    Assert.assertEquals(omPartKeyInfo, ro.getOmKeyInfoList().get(0));
+
+    return omPartKeyInfo;
+  }
+
+  private S3InitiateMultipartUploadResponse addS3InitiateMultipartUpload(
+          String volumeName, String bucketName, String keyName,
+          String multipartUploadID) throws IOException {
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+            createS3InitiateMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, new ArrayList<>());
+
+    s3InitiateMultipartUploadResponseV1.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    return s3InitiateMultipartUploadResponseV1;
+  }
+
+  private String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  private void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            dirName, omMetadataManager);
+  }
+
+  private void addS3MultipartUploadCommitPartResponseV1(String volumeName,
+      String bucketName, String keyName, String multipartUploadID,
+      String openKey) throws IOException {
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, null, null,
+                    OzoneManagerProtocolProtos.Status.OK, openKey);
+
+    s3MultipartUploadCommitPartResponse.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+  }
+}

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 07/29: HDDS-4596. Directory table, fileTable and openFile Table is missing from the OM DB Definition. (#1724)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 6febe52a469d4a02fb5330032eef910468c02cf4
Author: Mukul Kumar Singh <ms...@apache.org>
AuthorDate: Sat Dec 19 23:36:59 2020 +0530

    HDDS-4596. Directory table, fileTable and openFile Table is missing from the OM DB Definition. (#1724)
---
 .../ozone/om/codec/OmDirectoryInfoCodec.java       |  0
 .../hadoop/ozone/om/codec/OMDBDefinition.java      | 31 +++++++++++++++++++++-
 2 files changed, 30 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
similarity index 100%
rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
rename to hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index 6e30ca4..a968047 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 
 import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
@@ -143,6 +144,33 @@ public class OMDBDefinition implements DBDefinition {
                     TransactionInfo.class,
                     new TransactionInfoCodec());
 
+  public static final DBColumnFamilyDefinition<String, OmDirectoryInfo>
+            DIRECTORY_TABLE =
+            new DBColumnFamilyDefinition<>(
+                    OmMetadataManagerImpl.DIRECTORY_TABLE,
+                    String.class,
+                    new StringCodec(),
+                    OmDirectoryInfo.class,
+                    new OmDirectoryInfoCodec());
+
+  public static final DBColumnFamilyDefinition<String, OmKeyInfo>
+            FILE_TABLE =
+            new DBColumnFamilyDefinition<>(
+                    OmMetadataManagerImpl.FILE_TABLE,
+                    String.class,
+                    new StringCodec(),
+                    OmKeyInfo.class,
+                    new OmKeyInfoCodec(true));
+
+  public static final DBColumnFamilyDefinition<String, OmKeyInfo>
+            OPEN_FILE_TABLE =
+            new DBColumnFamilyDefinition<>(
+                  OmMetadataManagerImpl.OPEN_FILE_TABLE,
+                  String.class,
+                  new StringCodec(),
+                  OmKeyInfo.class,
+                  new OmKeyInfoCodec(true));
+
   @Override
   public String getName() {
     return OzoneConsts.OM_DB_NAME;
@@ -158,7 +186,8 @@ public class OMDBDefinition implements DBDefinition {
     return new DBColumnFamilyDefinition[] {DELETED_TABLE, USER_TABLE,
         VOLUME_TABLE, OPEN_KEY_TABLE, KEY_TABLE,
         BUCKET_TABLE, MULTIPART_INFO_TABLE, PREFIX_TABLE, DTOKEN_TABLE,
-        S3_SECRET_TABLE, TRANSACTION_INFO_TABLE};
+        S3_SECRET_TABLE, TRANSACTION_INFO_TABLE, DIRECTORY_TABLE,
+        FILE_TABLE, OPEN_FILE_TABLE};
   }
 }
 

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 06/29: HDDS-4321. Fix compilation errors : merge HDDS-4308 and HDDS-4473 changes into the branch (#1668)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit aebaa812b9ed462d16ec5a1f5df3740b55d0b706
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Dec 9 13:45:35 2020 +0530

    HDDS-4321. Fix compilation errors : merge HDDS-4308 and HDDS-4473 changes into the branch (#1668)
---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java    | 19 +++++++++----------
 .../ozone/om/request/file/OMFileCreateRequestV1.java  |  2 +-
 .../ozone/om/request/key/OMKeyCommitRequestV1.java    |  3 ++-
 .../ozone/om/request/key/OMKeyDeleteRequestV1.java    |  7 ++++---
 4 files changed, 16 insertions(+), 15 deletions(-)

diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 46ffce8..dc70369 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2427,20 +2427,19 @@ public class KeyManagerImpl implements KeyManager {
       metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
               bucketName);
     }
+    List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
     for (OzoneFileStatus fileStatus : fileStatusList) {
       if (fileStatus.isFile()) {
-        // refreshPipeline flag check has been removed as part of
-        // https://issues.apache.org/jira/browse/HDDS-3658.
-        // Please refer this jira for more details.
-        refresh(fileStatus.getKeyInfo());
-
-        // No need to check if a key is deleted or not here, this is handled
-        // when adding entries to cacheKeyMap from DB.
-        if (args.getSortDatanodes()) {
-          sortDatanodes(clientAddress, fileStatus.getKeyInfo());
-        }
+        keyInfoList.add(fileStatus.getKeyInfo());
       }
     }
+    // refreshPipeline flag check has been removed as part of
+    // https://issues.apache.org/jira/browse/HDDS-3658.
+    // Please refer this jira for more details.
+    refreshPipeline(keyInfoList);
+    if (args.getSortDatanodes()) {
+      sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
+    }
     fileStatusFinalList.addAll(fileStatusList);
     return fileStatusFinalList;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
index cabd407..606e15b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -214,7 +214,7 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
           .setOpenVersion(openVersion).build())
           .setCmdType(Type.CreateFile);
       omClientResponse = new OMFileCreateResponseV1(omResponse.build(),
-              omFileInfo, missingParentInfos, clientID, omBucketInfo);
+              omFileInfo, missingParentInfos, clientID, omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
index 3a7fd6d..8c47f7e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -170,7 +170,8 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
       omBucketInfo.incrUsedBytes(correctedSpace);
 
       omClientResponse = new OMKeyCommitResponseV1(omResponse.build(),
-              omKeyInfo, dbFileKey, dbOpenFileKey, omVolumeArgs, omBucketInfo);
+              omKeyInfo, dbFileKey, dbOpenFileKey, omVolumeArgs,
+              omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
index 93531bc..af3bc82 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
@@ -147,6 +147,7 @@ public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest {
 
       omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
 
+      // TODO: HDDS-4565: consider all the sub-paths if the path is a dir.
       long quotaReleased = sumBlockLengths(omKeyInfo);
       omBucketInfo.incrUsedBytes(-quotaReleased);
       omBucketInfo.incrUsedNamespace(-1L);
@@ -157,9 +158,9 @@ public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest {
       // TODO: Revisit if we need it later.
 
       omClientResponse = new OMKeyDeleteResponseV1(omResponse
-          .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
-          omKeyInfo, ozoneManager.isRatisEnabled(),
-          omBucketInfo, keyStatus.isDirectory());
+              .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
+              omKeyInfo, ozoneManager.isRatisEnabled(),
+              omBucketInfo.copyObject(), keyStatus.isDirectory());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 29/29: HDDS-5018. [FSO] Add robot tests for new Filesystem layout (#2071)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit bf340bc261404f3dc9d796de75cb47d640095233
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Tue Apr 6 18:42:34 2021 +0530

    HDDS-5018. [FSO] Add robot tests for new Filesystem layout (#2071)
---
 .../src/main/compose/ozone/docker-compose.yaml     | 10 +++++++++
 hadoop-ozone/dist/src/main/compose/ozone/test.sh   | 25 ++++++++++++++++------
 2 files changed, 29 insertions(+), 6 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
index 524d2e5..b7b450d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
@@ -25,6 +25,11 @@ x-common-config:
   env_file:
     - docker-config
 
+x-layout_version:
+  &layout_version
+  OZONE-SITE.XML_ozone.om.layout.version: ${OZONE_OM_LAYOUT_VERSION:-V0}
+  OZONE-SITE.XML_ozone.om.enable.filesystem.paths: ${OZONE_OM_ENABLE_FILESYSTEM_PATHS:-false}
+
 x-replication:
   &replication
   OZONE-SITE.XML_ozone.replication: ${OZONE_REPLICATION_FACTOR:-1}
@@ -37,6 +42,7 @@ services:
       - 9882
     environment:
       <<: *replication
+      <<: *layout_version
       OZONE_OPTS:
     command: ["ozone","datanode"]
   om:
@@ -45,6 +51,7 @@ services:
       ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
       OZONE_OPTS:
       <<: *replication
+      <<: *layout_version
     ports:
       - 9874:9874
       - 9862:9862
@@ -59,12 +66,14 @@ services:
       OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1}
       OZONE_OPTS:
       <<: *replication
+      <<: *layout_version
     command: ["ozone","scm"]
   s3g:
     <<: *common-config
     environment:
       OZONE_OPTS:
       <<: *replication
+      <<: *layout_version
     ports:
       - 9878:9878
     command: ["ozone","s3g"]
@@ -75,4 +84,5 @@ services:
     environment:
       OZONE_OPTS:
       <<: *replication
+      <<: *layout_version
     command: ["ozone","recon"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
index 16f55ca..6de26ab 100755
--- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
@@ -35,12 +35,6 @@ execute_robot_test scm basic
 
 execute_robot_test scm gdpr
 
-for scheme in ofs o3fs; do
-  for bucket in link bucket; do
-    execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} -N ozonefs-${scheme}-${bucket} ozonefs/ozonefs.robot
-  done
-done
-
 execute_robot_test scm security/ozone-secure-token.robot
 
 for bucket in link generated; do
@@ -57,4 +51,23 @@ execute_robot_test scm cli
 
 stop_docker_env
 
+# running FS tests with different config requires restart of the cluster
+export OZONE_KEEP_RESULTS=true
+export OZONE_OM_LAYOUT_VERSION OZONE_OM_ENABLE_FILESYSTEM_PATHS
+for OZONE_OM_LAYOUT_VERSION in V0 V1; do
+  if [[ $OZONE_OM_LAYOUT_VERSION == "V1" ]]; then
+    OZONE_OM_ENABLE_FILESYSTEM_PATHS=true
+  else
+    OZONE_OM_ENABLE_FILESYSTEM_PATHS=false
+  fi
+
+  start_docker_env
+  for scheme in ofs o3fs; do
+    for bucket in link bucket; do
+      execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} -N ozonefs-${OZONE_OM_LAYOUT_VERSION}-${scheme}-${bucket} ozonefs/ozonefs.robot
+    done
+  done
+  stop_docker_env
+done
+
 generate_report

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 02/29: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit f67dde1ea64f3be10441ff520baab7abf84a6c2a
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Tue Oct 13 22:48:35 2020 +0530

    HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)
---
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  | 109 +++++++-
 .../apache/hadoop/fs/ozone/TestOzoneFileOps.java   | 231 +++++++++++++++++
 .../src/main/proto/OmClientProtocol.proto          |   1 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  15 +-
 .../hadoop/ozone/om/codec/OmKeyInfoCodec.java      |   8 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  37 ++-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  19 +-
 .../request/file/OMDirectoryCreateRequestV1.java   |  21 +-
 .../ozone/om/request/file/OMFileCreateRequest.java |  53 ++--
 .../om/request/file/OMFileCreateRequestV1.java     | 258 +++++++++++++++++++
 .../ozone/om/request/file/OMFileRequest.java       | 132 +++++++++-
 .../ozone/om/request/key/OMKeyCommitRequest.java   |  34 ++-
 ...ommitRequest.java => OMKeyCommitRequestV1.java} | 252 ++++++++++---------
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  | 278 ++++++++++++---------
 .../om/response/file/OMFileCreateResponseV1.java   |  85 +++++++
 .../ozone/om/response/key/OMKeyCommitResponse.java |  15 ++
 ...mitResponse.java => OMKeyCommitResponseV1.java} |  53 ++--
 .../ozone/om/response/key/OMKeyCreateResponse.java |  14 +-
 .../ozone/om/request/TestOMRequestUtils.java       | 110 ++++++++
 .../file/TestOMDirectoryCreateRequestV1.java       |   1 +
 .../om/request/file/TestOMFileCreateRequest.java   |  91 ++++---
 .../om/request/file/TestOMFileCreateRequestV1.java | 192 ++++++++++++++
 .../om/request/key/TestOMKeyCommitRequest.java     |  74 ++++--
 .../om/request/key/TestOMKeyCommitRequestV1.java   | 106 ++++++++
 .../ozone/om/request/key/TestOMKeyRequest.java     |   9 +-
 .../response/file/TestOMFileCreateResponseV1.java  |  73 ++++++
 .../om/response/key/TestOMKeyCommitResponse.java   |  66 +++--
 .../om/response/key/TestOMKeyCommitResponseV1.java | 101 ++++++++
 .../om/response/key/TestOMKeyCreateResponse.java   |  38 +--
 .../ozone/om/response/key/TestOMKeyResponse.java   |  25 +-
 30 files changed, 2088 insertions(+), 413 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 43213a9..b6fb404 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -25,9 +25,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
@@ -52,6 +54,8 @@ public final class OmKeyInfo extends WithObjectID {
   private HddsProtos.ReplicationType type;
   private HddsProtos.ReplicationFactor factor;
   private FileEncryptionInfo encInfo;
+  private String fileName; // leaf node name
+  private long parentObjectID; // pointer to parent directory
 
   /**
    * ACL Information.
@@ -94,6 +98,22 @@ public final class OmKeyInfo extends WithObjectID {
     this.updateID = updateID;
   }
 
+  @SuppressWarnings("parameternumber")
+  OmKeyInfo(String volumeName, String bucketName, String keyName,
+            String fileName, List<OmKeyLocationInfoGroup> versions,
+            long dataSize, long creationTime, long modificationTime,
+            HddsProtos.ReplicationType type,
+            HddsProtos.ReplicationFactor factor,
+            Map<String, String> metadata,
+            FileEncryptionInfo encInfo, List<OzoneAcl> acls,
+            long parentObjectID, long objectID, long updateID) {
+    this(volumeName, bucketName, keyName, versions, dataSize,
+            creationTime, modificationTime, type, factor, metadata, encInfo,
+            acls, objectID, updateID);
+    this.fileName = fileName;
+    this.parentObjectID = parentObjectID;
+  }
+
   public String getVolumeName() {
     return volumeName;
   }
@@ -126,6 +146,19 @@ public final class OmKeyInfo extends WithObjectID {
     this.dataSize = size;
   }
 
+  public void setFileName(String fileName) {
+    this.fileName = fileName;
+  }
+
+  public String getFileName() {
+    return fileName;
+  }
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+
   public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() {
     return keyLocationVersions.size() == 0? null :
         keyLocationVersions.get(keyLocationVersions.size() - 1);
@@ -272,6 +305,9 @@ public final class OmKeyInfo extends WithObjectID {
     private List<OzoneAcl> acls;
     private long objectID;
     private long updateID;
+    // not persisted to DB. FileName will be the last element in path keyName.
+    private String fileName;
+    private long parentObjectID;
 
     public Builder() {
       this.metadata = new HashMap<>();
@@ -374,11 +410,22 @@ public final class OmKeyInfo extends WithObjectID {
       return this;
     }
 
+    public Builder setFileName(String keyFileName) {
+      this.fileName = keyFileName;
+      return this;
+    }
+
+    public Builder setParentObjectID(long parentID) {
+      this.parentObjectID = parentID;
+      return this;
+    }
+
     public OmKeyInfo build() {
       return new OmKeyInfo(
-          volumeName, bucketName, keyName, omKeyLocationInfoGroups,
-          dataSize, creationTime, modificationTime, type, factor, metadata,
-          encInfo, acls, objectID, updateID);
+              volumeName, bucketName, keyName, fileName,
+              omKeyLocationInfoGroups, dataSize, creationTime,
+              modificationTime, type, factor, metadata, encInfo, acls,
+              parentObjectID, objectID, updateID);
     }
   }
 
@@ -391,11 +438,33 @@ public final class OmKeyInfo extends WithObjectID {
   }
 
   /**
+   * For network transmit.
+   *
+   * @param fullKeyName the user given full key name
+   * @return key info with the user given full key name
+   */
+  public KeyInfo getProtobuf(String fullKeyName, int clientVersion) {
+    return getProtobuf(false, fullKeyName, clientVersion);
+  }
+
+  /**
    *
    * @param ignorePipeline true for persist to DB, false for network transmit.
    * @return
    */
   public KeyInfo getProtobuf(boolean ignorePipeline, int clientVersion) {
+    return getProtobuf(ignorePipeline, null, clientVersion);
+  }
+
+  /**
+   * Gets KeyInfo with the user given key name.
+   *
+   * @param ignorePipeline   ignore pipeline flag
+   * @param fullKeyName user given key name
+   * @return key info object
+   */
+  private KeyInfo getProtobuf(boolean ignorePipeline, String fullKeyName,
+                              int clientVersion) {
     long latestVersion = keyLocationVersions.size() == 0 ? -1 :
         keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
 
@@ -408,7 +477,6 @@ public final class OmKeyInfo extends WithObjectID {
     KeyInfo.Builder kb = KeyInfo.newBuilder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
-        .setKeyName(keyName)
         .setDataSize(dataSize)
         .setFactor(factor)
         .setType(type)
@@ -419,7 +487,13 @@ public final class OmKeyInfo extends WithObjectID {
         .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
         .addAllAcls(OzoneAclUtil.toProtobuf(acls))
         .setObjectID(objectID)
-        .setUpdateID(updateID);
+        .setUpdateID(updateID)
+        .setParentID(parentObjectID);
+    if (StringUtils.isNotBlank(fullKeyName)) {
+      kb.setKeyName(fullKeyName);
+    } else {
+      kb.setKeyName(keyName);
+    }
     if (encInfo != null) {
       kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo));
     }
@@ -457,6 +531,11 @@ public final class OmKeyInfo extends WithObjectID {
     if (keyInfo.hasUpdateID()) {
       builder.setUpdateID(keyInfo.getUpdateID());
     }
+    if (keyInfo.hasParentID()) {
+      builder.setParentObjectID(keyInfo.getParentID());
+    }
+    // not persisted to DB. FileName will be filtered out from keyName
+    builder.setFileName(OzoneFSUtils.getFileName(keyInfo.getKeyName()));
     return builder.build();
   }
 
@@ -470,6 +549,8 @@ public final class OmKeyInfo extends WithObjectID {
         ", creationTime='" + creationTime + '\'' +
         ", type='" + type + '\'' +
         ", factor='" + factor + '\'' +
+        ", objectID='" + objectID + '\'' +
+        ", parentID='" + parentObjectID + '\'' +
         '}';
   }
 
@@ -495,12 +576,13 @@ public final class OmKeyInfo extends WithObjectID {
         Objects.equals(metadata, omKeyInfo.metadata) &&
         Objects.equals(acls, omKeyInfo.acls) &&
         objectID == omKeyInfo.objectID &&
-        updateID == omKeyInfo.updateID;
+        updateID == omKeyInfo.updateID &&
+        parentObjectID == omKeyInfo.parentObjectID;
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(volumeName, bucketName, keyName);
+    return Objects.hash(volumeName, bucketName, keyName, parentObjectID);
   }
 
   /**
@@ -517,8 +599,10 @@ public final class OmKeyInfo extends WithObjectID {
         .setReplicationType(type)
         .setReplicationFactor(factor)
         .setFileEncryptionInfo(encInfo)
-        .setObjectID(objectID).setUpdateID(updateID);
-
+        .setObjectID(objectID)
+        .setUpdateID(updateID)
+        .setParentObjectID(parentObjectID)
+        .setFileName(fileName);
 
     keyLocationVersions.forEach(keyLocationVersion ->
         builder.addOmKeyLocationInfoGroup(
@@ -546,4 +630,11 @@ public final class OmKeyInfo extends WithObjectID {
   public void clearFileEncryptionInfo() {
     this.encInfo = null;
   }
+
+  public String getPath() {
+    if (StringUtils.isBlank(getFileName())) {
+      return getKeyName();
+    }
+    return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getFileName();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
new file mode 100644
index 0000000..d097268
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+
+/**
+ * Test verifies the entries and operations in file table, open file table etc.
+ */
+public class TestOzoneFileOps {
+
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestOzoneFileOps.class);
+
+  private MiniOzoneCluster cluster;
+  private FileSystem fs;
+  private String volumeName;
+  private String bucketName;
+
+  @Before
+  public void setupOzoneFileSystem()
+          throws IOException, TimeoutException, InterruptedException {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, false);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .build();
+    cluster.waitForClusterToBeReady();
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+            bucket.getVolumeName());
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+    fs = FileSystem.get(conf);
+  }
+
+  @After
+  public void tearDown() {
+    IOUtils.closeQuietly(fs);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test(timeout = 300_000)
+  public void testCreateFile() throws Exception {
+    // Op 1. create dir -> /d1/d2/d3/d4/
+    Path parent = new Path("/d1/d2/");
+    Path file = new Path(parent, "file1");
+    FSDataOutputStream outputStream = fs.create(file);
+    String openFileKey = "";
+
+    OMMetadataManager omMgr = cluster.getOzoneManager().getMetadataManager();
+    OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(
+            omMgr.getBucketKey(volumeName, bucketName));
+    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
+
+    ArrayList<String> dirKeys = new ArrayList<>();
+    long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1",
+            dirKeys, omMgr);
+    long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys,
+            omMgr);
+    openFileKey = d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
+
+    // verify entries in directory table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmDirectoryInfo>> iterator =
+            omMgr.getDirectoryTable().iterator();
+    iterator.seekToFirst();
+    int count = dirKeys.size();
+    Assert.assertEquals("Unexpected directory table entries!", 2, count);
+    while (iterator.hasNext()) {
+      count--;
+      Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
+      verifyKeyFormat(value.getKey(), dirKeys);
+    }
+    Assert.assertEquals("Unexpected directory table entries!", 0, count);
+
+    // verify entries in open key table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmKeyInfo>> keysItr =
+            omMgr.getOpenKeyTable().iterator();
+    keysItr.seekToFirst();
+
+    while (keysItr.hasNext()) {
+      count++;
+      Table.KeyValue<String, OmKeyInfo> value = keysItr.next();
+      verifyOpenKeyFormat(value.getKey(), openFileKey);
+      verifyOMFileInfoFormat(value.getValue(), file.getName(), d2ObjectID);
+    }
+    Assert.assertEquals("Unexpected file table entries!", 1, count);
+
+    // trigger CommitKeyRequest
+    outputStream.close();
+
+    Assert.assertTrue("Failed to commit the open file:" + openFileKey,
+            omMgr.getOpenKeyTable().isEmpty());
+
+    OmKeyInfo omKeyInfo = omMgr.getKeyTable().get(openFileKey);
+    Assert.assertNotNull("Invalid Key!", omKeyInfo);
+    verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
+  }
+
+  private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName,
+                                      long parentID) {
+    Assert.assertEquals("Wrong keyName", fileName,
+            omKeyInfo.getKeyName());
+    Assert.assertEquals("Wrong parentID", parentID,
+            omKeyInfo.getParentObjectID());
+    String dbKey = parentID + OzoneConsts.OM_KEY_PREFIX + fileName;
+    Assert.assertEquals("Wrong path format", dbKey,
+            omKeyInfo.getPath());
+  }
+
+  /**
+   * Verify key name format and the DB key existence in the expected dirKeys
+   * list.
+   *
+   * @param key     table keyName
+   * @param dirKeys expected keyName
+   */
+  private void verifyKeyFormat(String key, ArrayList<String> dirKeys) {
+    String[] keyParts = StringUtils.split(key,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
+    boolean removed = dirKeys.remove(key);
+    Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
+            removed);
+  }
+
+  /**
+   * Verify key name format and the DB key existence in the expected
+   * openFileKeys list.
+   *
+   * @param key          table keyName
+   * @param openFileKey expected keyName
+   */
+  private void verifyOpenKeyFormat(String key, String openFileKey) {
+    String[] keyParts = StringUtils.split(key,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("Invalid KeyName:" + key, 3, keyParts.length);
+    String[] expectedOpenFileParts = StringUtils.split(openFileKey,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("ParentId/Key:" + expectedOpenFileParts[0]
+                    + " doesn't exists in openFileTable!",
+            expectedOpenFileParts[0] + OzoneConsts.OM_KEY_PREFIX
+                    + expectedOpenFileParts[1],
+            keyParts[0] + OzoneConsts.OM_KEY_PREFIX + keyParts[1]);
+  }
+
+  long verifyDirKey(long parentId, String dirKey, String absolutePath,
+                    ArrayList<String> dirKeys, OMMetadataManager omMgr)
+          throws Exception {
+    String dbKey = parentId + OzoneConsts.OM_KEY_PREFIX + dirKey;
+    dirKeys.add(dbKey);
+    OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey);
+    Assert.assertNotNull("Failed to find " + absolutePath +
+            " using dbKey: " + dbKey, dirInfo);
+    Assert.assertEquals("Parent Id mismatches", parentId,
+            dirInfo.getParentObjectID());
+    Assert.assertEquals("Mismatches directory name", dirKey,
+            dirInfo.getName());
+    Assert.assertTrue("Mismatches directory creation time param",
+            dirInfo.getCreationTime() > 0);
+    Assert.assertEquals("Mismatches directory modification time param",
+            dirInfo.getCreationTime(), dirInfo.getModificationTime());
+    return dirInfo.getObjectID();
+  }
+
+}
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 68dd184..c10a79d 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -774,6 +774,7 @@ message KeyInfo {
     repeated OzoneAclInfo acls = 13;
     optional uint64 objectID = 14;
     optional uint64 updateID = 15;
+    optional uint64 parentID = 16;
 }
 
 message DirectoryInfo {
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 7efe0a3..7ff684b 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -407,12 +407,23 @@ public interface OMMetadataManager extends DBStoreHAManager {
       getKeyIterator();
 
   /**
-   * Given a volume, bucket and a key, return the corresponding DB prefixKey
-   * key.
+   * Given parent object id and path component name, return the corresponding
+   * DB 'prefixKey' key.
    *
    * @param parentObjectId - parent object Id
    * @param pathComponentName   - path component name
    * @return DB directory key as String.
    */
   String getOzonePathKey(long parentObjectId, String pathComponentName);
+
+  /**
+   * Returns DB key name of an open file in OM metadata store. Should be
+   * #open# prefix followed by actual leaf node name.
+   *
+   * @param parentObjectId - parent object Id
+   * @param fileName       - file name
+   * @param id             - client id for this open request
+   * @return DB directory key as String.
+   */
+  String getOpenFileName(long parentObjectId, String fileName, long id);
 }
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
index 3d8ace3..5855afc 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
@@ -32,6 +32,14 @@ import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
 
 /**
  * Codec to encode OmKeyInfo as byte array.
+ *
+ * <p>
+ * If the layout version "ozone.om.layout.version" is V1 and
+ * "ozone.om.enable.filesystem.paths" is TRUE. Then, DB stores only the leaf
+ * node name into the 'keyName' field.
+ * <p>
+ * For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+ * 'keyName' field stores only the leaf node name, which is 'file1'.
  */
 public class OmKeyInfoCodec implements Codec<OmKeyInfo> {
   private static final Logger LOG =
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 28add3b..7042d67 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
 import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.storage.proto
     .OzoneManagerStorageProtos.PersistedUserVolumeInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -130,6 +131,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * |  directoryTable    | parentId/directoryName -> DirectoryInfo         |
    * |----------------------------------------------------------------------|
+   * |  fileTable         | parentId/fileName -> KeyInfo                |
+   * |----------------------------------------------------------------------|
+   * |  openFileTable     | parentId/fileName/id -> KeyInfo                   |
+   * |----------------------------------------------------------------------|
    * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
    * |----------------------------------------------------------------------|
    */
@@ -145,6 +150,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String DELEGATION_TOKEN_TABLE = "dTokenTable";
   public static final String PREFIX_TABLE = "prefixTable";
   public static final String DIRECTORY_TABLE = "directoryTable";
+  public static final String FILE_TABLE = "fileTable";
+  public static final String OPEN_FILE_TABLE = "openFileTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
@@ -164,6 +171,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private Table dTokenTable;
   private Table prefixTable;
   private Table dirTable;
+  private Table fileTable;
+  private Table openFileTable;
   private Table transactionInfoTable;
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
@@ -202,7 +211,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * For subclass overriding.
    */
   protected OmMetadataManagerImpl() {
-    this.lock = new OzoneManagerLock(new OzoneConfiguration());
+    OzoneConfiguration conf = new OzoneConfiguration();
+    this.lock = new OzoneManagerLock(conf);
     this.openKeyExpireThresholdMS =
         OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
     this.omEpoch = 0;
@@ -230,6 +240,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmKeyInfo> getKeyTable() {
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return fileTable;
+    }
     return keyTable;
   }
 
@@ -240,6 +253,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmKeyInfo> getOpenKeyTable() {
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return openFileTable;
+    }
     return openKeyTable;
   }
 
@@ -346,6 +362,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addTable(S3_SECRET_TABLE)
         .addTable(PREFIX_TABLE)
         .addTable(DIRECTORY_TABLE)
+        .addTable(FILE_TABLE)
+        .addTable(OPEN_FILE_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -416,6 +434,14 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
             OmDirectoryInfo.class);
     checkTableStatus(dirTable, DIRECTORY_TABLE);
 
+    fileTable = this.store.getTable(FILE_TABLE, String.class,
+            OmKeyInfo.class);
+    checkTableStatus(fileTable, FILE_TABLE);
+
+    openFileTable = this.store.getTable(OPEN_FILE_TABLE, String.class,
+            OmKeyInfo.class);
+    checkTableStatus(openFileTable, OPEN_FILE_TABLE);
+
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
         String.class, TransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
@@ -1189,4 +1215,13 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
     return builder.toString();
   }
 
+  @Override
+  public String getOpenFileName(long parentID, String fileName,
+                                long id) {
+    StringBuilder openKey = new StringBuilder();
+    openKey.append(parentID);
+    openKey.append(OM_KEY_PREFIX).append(fileName);
+    openKey.append(OM_KEY_PREFIX).append(id);
+    return openKey.toString();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index c3fc994..2d98362 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -34,9 +34,11 @@ import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
@@ -82,7 +84,7 @@ public final class OzoneManagerRatisUtils {
 
   // TODO: Temporary workaround for OM upgrade path and will be replaced once
   //  upgrade HDDS-3698 story reaches consensus.
-  private static boolean omLayoutVersionV1 = true;
+  private static boolean omLayoutVersionV1 = false;
 
   private OzoneManagerRatisUtils() {
   }
@@ -134,6 +136,9 @@ public final class OzoneManagerRatisUtils {
     case CreateKey:
       return new OMKeyCreateRequest(omRequest);
     case CommitKey:
+      if (omLayoutVersionV1) {
+        return new OMKeyCommitRequestV1(omRequest);
+      }
       return new OMKeyCommitRequest(omRequest);
     case DeleteKey:
       return new OMKeyDeleteRequest(omRequest);
@@ -149,6 +154,9 @@ public final class OzoneManagerRatisUtils {
       }
       return new OMDirectoryCreateRequest(omRequest);
     case CreateFile:
+      if (omLayoutVersionV1) {
+        return new OMFileCreateRequestV1(omRequest);
+      }
       return new OMFileCreateRequest(omRequest);
     case PurgeKeys:
       return new OMKeyPurgeRequest(omRequest);
@@ -270,4 +278,13 @@ public final class OzoneManagerRatisUtils {
         .verifyTransactionInfo(transactionInfo, lastAppliedIndex, leaderId,
             newDBlocation, OzoneManager.LOG);
   }
+
+  /**
+   * Returns layout version flag represents V1.
+   * @return
+   */
+  public static boolean isOmLayoutVersionV1() {
+    return omLayoutVersionV1;
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
index 8b0727a..c48ff78 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
@@ -147,8 +147,10 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
           omDirectoryResult == NONE) {
 
         // prepare all missing parents
-        missingParentInfos = OMDirectoryCreateRequestV1.getAllParentDirInfo(
-                ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+        missingParentInfos =
+                OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
+                        ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+
         // prepare leafNode dir
         OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(
                 omPathInfo.getLeafNodeName(),
@@ -229,14 +231,15 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
 
   /**
    * Construct OmDirectoryInfo for every parent directory in missing list.
-   * @param ozoneManager
-   * @param keyArgs
-   * @param pathInfo list of parent directories to be created and its ACLs
-   * @param trxnLogIndex
-   * @return
-   * @throws IOException
+   *
+   * @param ozoneManager Ozone Manager
+   * @param keyArgs      key arguments
+   * @param pathInfo     list of parent directories to be created and its ACLs
+   * @param trxnLogIndex transaction log index id
+   * @return list of missing parent directories
+   * @throws IOException DB failure
    */
-  public static List<OmDirectoryInfo> getAllParentDirInfo(
+  public static List<OmDirectoryInfo> getAllMissingParentDirInfo(
           OzoneManager ozoneManager, KeyArgs keyArgs,
           OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex)
           throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index 0619062..82fd4a7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -234,23 +234,10 @@ public class OMFileCreateRequest extends OMKeyRequest {
       List<OzoneAcl> inheritAcls = pathInfo.getAcls();
 
       // Check if a file or directory exists with same key name.
-      if (omDirectoryResult == FILE_EXISTS) {
-        if (!isOverWrite) {
-          throw new OMException("File " + keyName + " already exists",
-              OMException.ResultCodes.FILE_ALREADY_EXISTS);
-        }
-      } else if (omDirectoryResult == DIRECTORY_EXISTS) {
-        throw new OMException("Can not write to directory: " + keyName,
-            OMException.ResultCodes.NOT_A_FILE);
-      } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
-        throw new OMException(
-            "Can not create file: " + keyName + " as there " +
-                "is already file in the given path",
-            OMException.ResultCodes.NOT_A_FILE);
-      }
+      checkDirectoryResult(keyName, isOverWrite, omDirectoryResult);
 
       if (!isRecursive) {
-        checkAllParentsExist(ozoneManager, keyArgs, pathInfo);
+        checkAllParentsExist(keyArgs, pathInfo);
       }
 
       // do open key
@@ -355,8 +342,40 @@ public class OMFileCreateRequest extends OMKeyRequest {
     return omClientResponse;
   }
 
-  private void checkAllParentsExist(OzoneManager ozoneManager,
-      KeyArgs keyArgs,
+  /**
+   * Verify om directory result.
+   *
+   * @param keyName           key name
+   * @param isOverWrite       flag represents whether file can be overwritten
+   * @param omDirectoryResult directory result
+   * @throws OMException if file or directory or file exists in the given path
+   */
+  protected void checkDirectoryResult(String keyName, boolean isOverWrite,
+      OMFileRequest.OMDirectoryResult omDirectoryResult) throws OMException {
+    if (omDirectoryResult == FILE_EXISTS) {
+      if (!isOverWrite) {
+        throw new OMException("File " + keyName + " already exists",
+            OMException.ResultCodes.FILE_ALREADY_EXISTS);
+      }
+    } else if (omDirectoryResult == DIRECTORY_EXISTS) {
+      throw new OMException("Can not write to directory: " + keyName,
+          OMException.ResultCodes.NOT_A_FILE);
+    } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
+      throw new OMException(
+          "Can not create file: " + keyName + " as there " +
+              "is already file in the given path",
+          OMException.ResultCodes.NOT_A_FILE);
+    }
+  }
+
+  /**
+   * Verify the existence of parent directory.
+   *
+   * @param keyArgs  key arguments
+   * @param pathInfo om path info
+   * @throws IOException directory not found
+   */
+  protected void checkAllParentsExist(KeyArgs keyArgs,
       OMFileRequest.OMPathInfo pathInfo) throws IOException {
     String keyName = keyArgs.getKeyName();
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
new file mode 100644
index 0000000..cabd407
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles create file request layout version1.
+ */
+public class OMFileCreateRequestV1 extends OMFileCreateRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMFileCreateRequestV1.class);
+  public OMFileCreateRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
+    KeyArgs keyArgs = createFileRequest.getKeyArgs();
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    // if isRecursive is true, file would be created even if parent
+    // directories does not exist.
+    boolean isRecursive = createFileRequest.getIsRecursive();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("File create for : " + volumeName + "/" + bucketName + "/"
+          + keyName + ":" + isRecursive);
+    }
+
+    // if isOverWrite is true, file would be over written.
+    boolean isOverWrite = createFileRequest.getIsOverwrite();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumCreateFile();
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    boolean acquiredLock = false;
+
+    OmBucketInfo omBucketInfo = null;
+    final List<OmKeyLocationInfo> locations = new ArrayList<>();
+    List<OmDirectoryInfo> missingParentInfos;
+    int numKeysCreated = 0;
+
+    OMClientResponse omClientResponse = null;
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    IOException exception = null;
+    Result result = null;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      if (keyName.length() == 0) {
+        // Check if this is the root of the filesystem.
+        throw new OMException("Can not write to directory: " + keyName,
+                OMException.ResultCodes.NOT_A_FILE);
+      }
+
+      // check Acl
+      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+      // acquire lock
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      OmKeyInfo dbFileInfo = null;
+
+      OMFileRequest.OMPathInfoV1 pathInfoV1 =
+              OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+                      volumeName, bucketName, keyName, Paths.get(keyName));
+
+      if (pathInfoV1.getDirectoryResult()
+              == OMFileRequest.OMDirectoryResult.FILE_EXISTS) {
+        String dbFileKey = omMetadataManager.getOzonePathKey(
+                pathInfoV1.getLastKnownParentId(),
+                pathInfoV1.getLeafNodeName());
+        dbFileInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+                omMetadataManager, dbFileKey, keyName);
+      }
+
+      // check if the file or directory already existed in OM
+      checkDirectoryResult(keyName, isOverWrite,
+              pathInfoV1.getDirectoryResult());
+
+      if (!isRecursive) {
+        checkAllParentsExist(keyArgs, pathInfoV1);
+      }
+
+      // add all missing parents to dir table
+      missingParentInfos =
+              OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
+                      ozoneManager, keyArgs, pathInfoV1, trxnLogIndex);
+
+      // total number of keys created.
+      numKeysCreated = missingParentInfos.size();
+
+      // do open key
+      OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
+          omMetadataManager.getBucketKey(volumeName, bucketName));
+
+      OmKeyInfo omFileInfo = prepareFileInfo(omMetadataManager, keyArgs,
+              dbFileInfo, keyArgs.getDataSize(), locations,
+              getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(),
+              bucketInfo, pathInfoV1, trxnLogIndex,
+              pathInfoV1.getLeafNodeObjectId(),
+              ozoneManager.isRatisEnabled());
+
+      long openVersion = omFileInfo.getLatestVersionLocations().getVersion();
+      long clientID = createFileRequest.getClientID();
+      String dbOpenFileName = omMetadataManager.getOpenFileName(
+              pathInfoV1.getLastKnownParentId(), pathInfoV1.getLeafNodeName(),
+              clientID);
+
+      // Append new blocks
+      List<OmKeyLocationInfo> newLocationList = keyArgs.getKeyLocationsList()
+          .stream().map(OmKeyLocationInfo::getFromProtobuf)
+          .collect(Collectors.toList());
+      omFileInfo.appendNewBlocks(newLocationList, false);
+
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+      // check bucket and volume quota
+      long preAllocatedSpace = newLocationList.size()
+              * ozoneManager.getScmBlockSize()
+              * omFileInfo.getFactor().getNumber();
+      checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace);
+      checkBucketQuotaInNamespace(omBucketInfo, 1L);
+
+      // Add to cache entry can be done outside of lock for this openKey.
+      // Even if bucket gets deleted, when commitKey we shall identify if
+      // bucket gets deleted.
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
+              dbOpenFileName, omFileInfo, pathInfoV1.getLeafNodeName(),
+              trxnLogIndex);
+
+      // Add cache entries for the prefix directories.
+      // Skip adding for the file key itself, until Key Commit.
+      OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+              Optional.absent(), Optional.of(missingParentInfos),
+              trxnLogIndex);
+
+      omBucketInfo.incrUsedBytes(preAllocatedSpace);
+      // Update namespace quota
+      omBucketInfo.incrUsedNamespace(1L);
+
+      // Prepare response. Sets user given full key name in the 'keyName'
+      // attribute in response object.
+      int clientVersion = getOmRequest().getVersion();
+      omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
+          .setKeyInfo(omFileInfo.getProtobuf(keyName, clientVersion))
+          .setID(clientID)
+          .setOpenVersion(openVersion).build())
+          .setCmdType(Type.CreateFile);
+      omClientResponse = new OMFileCreateResponseV1(omResponse.build(),
+              omFileInfo, missingParentInfos, clientID, omBucketInfo);
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omMetrics.incNumCreateFileFails();
+      omResponse.setCmdType(Type.CreateFile);
+      omClientResponse = new OMFileCreateResponse(createErrorOMResponse(
+            omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+          omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+            bucketName);
+      }
+    }
+
+    // Audit Log outside the lock
+    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+        OMAction.CREATE_FILE, auditMap, exception,
+        getOmRequest().getUserInfo()));
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.incNumKeys(numKeysCreated);
+      LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName,
+          bucketName, keyName);
+      break;
+    case FAILURE:
+      LOG.error("File create failed. Volume:{}, Bucket:{}, Key{}.",
+          volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMFileCreateRequest: {}",
+          createFileRequest);
+    }
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index d5543ba..ce8d49b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -25,6 +25,7 @@ import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -34,6 +35,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.jetbrains.annotations.Nullable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -396,7 +398,7 @@ public final class OMFileRequest {
   /**
    * Adding directory info to the Table cache.
    *
-   * @param omMetadataManager  OM Metdata Manager
+   * @param omMetadataManager  OM Metadata Manager
    * @param dirInfo            directory info
    * @param missingParentInfos list of the parents to be added to DB
    * @param trxnLogIndex       transaction log index
@@ -422,4 +424,132 @@ public final class OMFileRequest {
     }
   }
 
+  /**
+   * Adding Key info to the openFile Table cache.
+   *
+   * @param omMetadataManager OM Metadata Manager
+   * @param dbOpenFileName    open file name key
+   * @param omFileInfo        key info
+   * @param fileName          file name
+   * @param trxnLogIndex      transaction log index
+   * @return dbOmFileInfo, which keeps leaf node name in keyName field
+   */
+  public static void addOpenFileTableCacheEntry(
+          OMMetadataManager omMetadataManager, String dbOpenFileName,
+          @Nullable OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+
+    Optional<OmKeyInfo> keyInfoOptional = Optional.absent();
+    if (omFileInfo != null) {
+      // New key format for the openFileTable.
+      // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+      // keyName field stores only the leaf node name, which is 'file1'.
+      omFileInfo.setKeyName(fileName);
+      keyInfoOptional = Optional.of(omFileInfo);
+    }
+
+    omMetadataManager.getOpenKeyTable().addCacheEntry(
+            new CacheKey<>(dbOpenFileName),
+            new CacheValue<>(keyInfoOptional, trxnLogIndex));
+  }
+
+  /**
+   * Adding Key info to the file table cache.
+   *
+   * @param omMetadataManager OM Metadata Manager
+   * @param dbFileKey         file name key
+   * @param omFileInfo        key info
+   * @param fileName          file name
+   * @param trxnLogIndex      transaction log index
+   * @return dbOmFileInfo, which keeps leaf node name in keyName field
+   */
+  public static void addFileTableCacheEntry(
+          OMMetadataManager omMetadataManager, String dbFileKey,
+          OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+
+    // New key format for the fileTable.
+    // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+    // keyName field stores only the leaf node name, which is 'file1'.
+    omFileInfo.setKeyName(fileName);
+
+    omMetadataManager.getKeyTable().addCacheEntry(
+            new CacheKey<>(dbFileKey),
+            new CacheValue<>(Optional.of(omFileInfo), trxnLogIndex));
+  }
+
+  /**
+   * Adding omKeyInfo to open file table.
+   *
+   * @param omMetadataMgr    OM Metadata Manager
+   * @param batchOp          batch of db operations
+   * @param omFileInfo       omKeyInfo
+   * @param openKeySessionID clientID
+   * @throws IOException DB failure
+   */
+  public static void addToOpenFileTable(OMMetadataManager omMetadataMgr,
+                                        BatchOperation batchOp,
+                                        OmKeyInfo omFileInfo,
+                                        long openKeySessionID)
+          throws IOException {
+
+    String dbOpenFileKey = omMetadataMgr.getOpenFileName(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName(),
+            openKeySessionID);
+
+    omMetadataMgr.getOpenKeyTable().putWithBatch(batchOp, dbOpenFileKey,
+            omFileInfo);
+  }
+
+  /**
+   * Adding omKeyInfo to file table.
+   *
+   * @param omMetadataMgr
+   * @param batchOp
+   * @param omFileInfo
+   * @throws IOException
+   */
+  public static void addToFileTable(OMMetadataManager omMetadataMgr,
+                                    BatchOperation batchOp,
+                                    OmKeyInfo omFileInfo)
+          throws IOException {
+
+    String dbFileKey = omMetadataMgr.getOzonePathKey(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName());
+
+    omMetadataMgr.getKeyTable().putWithBatch(batchOp,
+            dbFileKey, omFileInfo);
+  }
+
+  /**
+   * Gets om key info from open key table if openFileTable flag is true,
+   * otherwise get it from key table.
+   *
+   * @param openFileTable if true add KeyInfo to openFileTable, otherwise to
+   *                      fileTable
+   * @param omMetadataMgr OM Metadata Manager
+   * @param dbOpenFileKey open file kaye name in DB
+   * @param keyName       key name
+   * @return om key info
+   * @throws IOException DB failure
+   */
+  public static OmKeyInfo getOmKeyInfoFromFileTable(boolean openFileTable,
+      OMMetadataManager omMetadataMgr, String dbOpenFileKey, String keyName)
+          throws IOException {
+
+    OmKeyInfo dbOmKeyInfo;
+    if (openFileTable) {
+      dbOmKeyInfo = omMetadataMgr.getOpenKeyTable().get(dbOpenFileKey);
+    } else {
+      dbOmKeyInfo = omMetadataMgr.getKeyTable().get(dbOpenFileKey);
+    }
+
+    // DB OMKeyInfo will store only fileName into keyName field. This
+    // function is to set user given keyName into the OmKeyInfo object.
+    // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+    // keyName field stores only the leaf node name, which is 'file1'.
+    if (dbOmKeyInfo != null) {
+      dbOmKeyInfo.setKeyName(keyName);
+    }
+    return dbOmKeyInfo;
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index fe72ea2..c0bc773 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -233,6 +233,30 @@ public class OMKeyCommitRequest extends OMKeyRequest {
     auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
           exception, getOmRequest().getUserInfo()));
 
+    processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics,
+            exception, omKeyInfo, result);
+
+    return omClientResponse;
+  }
+
+  /**
+   * Process result of om request execution.
+   *
+   * @param commitKeyRequest commit key request
+   * @param volumeName       volume name
+   * @param bucketName       bucket name
+   * @param keyName          key name
+   * @param omMetrics        om metrics
+   * @param exception        exception trace
+   * @param omKeyInfo        omKeyInfo
+   * @param result           stores the result of the execution
+   */
+  @SuppressWarnings("parameternumber")
+  protected void processResult(CommitKeyRequest commitKeyRequest,
+                               String volumeName, String bucketName,
+                               String keyName, OMMetrics omMetrics,
+                               IOException exception, OmKeyInfo omKeyInfo,
+                               Result result) {
     switch (result) {
     case SUCCESS:
       // As when we commit the key, then it is visible in ozone, so we should
@@ -244,18 +268,16 @@ public class OMKeyCommitRequest extends OMKeyRequest {
         omMetrics.incNumKeys();
       }
       LOG.debug("Key committed. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
+              bucketName, keyName);
       break;
     case FAILURE:
-      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}.",
-          volumeName, bucketName, keyName, exception);
+      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}. Exception:{}",
+              volumeName, bucketName, keyName, exception);
       omMetrics.incNumKeyCommitFails();
       break;
     default:
       LOG.error("Unrecognized Result for OMKeyCommitRequest: {}",
-          commitKeyRequest);
+              commitKeyRequest);
     }
-
-    return omClientResponse;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
similarity index 53%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
index fe72ea2..3a7fd6d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -18,87 +18,57 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 
 /**
- * Handles CommitKey request.
+ * Handles CommitKey request layout version V1.
  */
-public class OMKeyCommitRequest extends OMKeyRequest {
+public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyCommitRequest.class);
+          LoggerFactory.getLogger(OMKeyCommitRequestV1.class);
 
-  public OMKeyCommitRequest(OMRequest omRequest) {
+  public OMKeyCommitRequestV1(OMRequest omRequest) {
     super(omRequest);
   }
 
   @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
-    Preconditions.checkNotNull(commitKeyRequest);
-
-    KeyArgs keyArgs = commitKeyRequest.getKeyArgs();
-
-    // Verify key name
-    final boolean checkKeyNameEnabled = ozoneManager.getConfiguration()
-         .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY,
-                 OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT);
-    if(checkKeyNameEnabled){
-      OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(),
-              OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX));
-    }
-
-    KeyArgs.Builder newKeyArgs =
-        keyArgs.toBuilder().setModificationTime(Time.now())
-            .setKeyName(validateAndNormalizeKey(
-                ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName()));
-
-    return getOmRequest().toBuilder()
-        .setCommitKeyRequest(commitKeyRequest.toBuilder()
-            .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build();
-  }
-
-  @Override
   @SuppressWarnings("methodlength")
   public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
       long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
@@ -119,10 +89,11 @@ public class OMKeyCommitRequest extends OMKeyRequest {
     Map<String, String> auditMap = buildKeyArgsAuditMap(commitKeyArgs);
 
     OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
-        getOmRequest());
+            getOmRequest());
 
     IOException exception = null;
     OmKeyInfo omKeyInfo = null;
+    OmVolumeArgs omVolumeArgs = null;
     OmBucketInfo omBucketInfo = null;
     OMClientResponse omClientResponse = null;
     boolean bucketLockAcquired = false;
@@ -137,14 +108,13 @@ public class OMKeyCommitRequest extends OMKeyRequest {
 
       // check Acl
       checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName,
-          keyName, IAccessAuthorizer.ACLType.WRITE,
-          commitKeyRequest.getClientID());
+              keyName, IAccessAuthorizer.ACLType.WRITE,
+              commitKeyRequest.getClientID());
 
-      String dbOzoneKey =
-          omMetadataManager.getOzoneKey(volumeName, bucketName,
-              keyName);
-      String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-          keyName, commitKeyRequest.getClientID());
+
+      String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+      Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+      String dbOpenFileKey = null;
 
       List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
       for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) {
@@ -152,110 +122,148 @@ public class OMKeyCommitRequest extends OMKeyRequest {
       }
 
       bucketLockAcquired =
-          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-              volumeName, bucketName);
+              omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+                      volumeName, bucketName);
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
-      // Check for directory exists with same name, if it exists throw error.
-      if (ozoneManager.getEnableFileSystemPaths()) {
-        if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
-            omMetadataManager)) {
-          throw new OMException("Can not create file: " + keyName +
-              " as there is already directory in the given path", NOT_A_FILE);
-        }
-        // Ensure the parent exist.
-        if (!"".equals(OzoneFSUtils.getParent(keyName))
-            && !checkDirectoryAlreadyExists(volumeName, bucketName,
-            OzoneFSUtils.getParent(keyName), omMetadataManager)) {
-          throw new OMException("Cannot create file : " + keyName
-              + " as parent directory doesn't exist",
-              OMException.ResultCodes.DIRECTORY_NOT_FOUND);
-        }
-      }
-
-      omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey);
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
+      long bucketId = omBucketInfo.getObjectID();
+      long parentID = getParentID(bucketId, pathComponents, keyName,
+              omMetadataManager, ozoneManager);
+      String dbFileKey = omMetadataManager.getOzonePathKey(parentID, fileName);
+      dbOpenFileKey = omMetadataManager.getOpenFileName(parentID, fileName,
+              commitKeyRequest.getClientID());
+
+      omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true,
+              omMetadataManager, dbOpenFileKey, keyName);
       if (omKeyInfo == null) {
-        throw new OMException("Failed to commit key, as " + dbOpenKey +
-            "entry is not found in the OpenKey table", KEY_NOT_FOUND);
+        throw new OMException("Failed to commit key, as " + dbOpenFileKey +
+                "entry is not found in the OpenKey table", KEY_NOT_FOUND);
       }
       omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
 
       omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
 
       // Update the block length for each block
-      List<OmKeyLocationInfo> allocatedLocationInfoList =
-          omKeyInfo.getLatestVersionLocations().getLocationList();
       omKeyInfo.updateLocationInfoList(locationInfoList, false);
 
       // Set the UpdateID to current transactionLogIndex
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
       // Add to cache of open key table and key table.
-      omMetadataManager.getOpenKeyTable().addCacheEntry(
-          new CacheKey<>(dbOpenKey),
-          new CacheValue<>(Optional.absent(), trxnLogIndex));
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, dbFileKey,
+              null, fileName, trxnLogIndex);
 
-      omMetadataManager.getKeyTable().addCacheEntry(
-          new CacheKey<>(dbOzoneKey),
-          new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      OMFileRequest.addFileTableCacheEntry(omMetadataManager, dbFileKey,
+              omKeyInfo, fileName, trxnLogIndex);
 
       long scmBlockSize = ozoneManager.getScmBlockSize();
       int factor = omKeyInfo.getFactor().getNumber();
-      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
       // Block was pre-requested and UsedBytes updated when createKey and
       // AllocatedBlock. The space occupied by the Key shall be based on
       // the actual Key size, and the total Block size applied before should
       // be subtracted.
       long correctedSpace = omKeyInfo.getDataSize() * factor -
-          allocatedLocationInfoList.size() * scmBlockSize * factor;
+              locationInfoList.size() * scmBlockSize * factor;
       omBucketInfo.incrUsedBytes(correctedSpace);
 
-      omClientResponse = new OMKeyCommitResponse(omResponse.build(),
-          omKeyInfo, dbOzoneKey, dbOpenKey, omBucketInfo.copyObject());
+      omClientResponse = new OMKeyCommitResponseV1(omResponse.build(),
+              omKeyInfo, dbFileKey, dbOpenFileKey, omVolumeArgs, omBucketInfo);
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new OMKeyCommitResponse(createErrorOMResponse(
-          omResponse, exception));
+      omClientResponse = new OMKeyCommitResponseV1(createErrorOMResponse(
+              omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
-          omDoubleBufferHelper);
+              omDoubleBufferHelper);
 
       if(bucketLockAcquired) {
         omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
+                bucketName);
       }
     }
 
     auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
-          exception, getOmRequest().getUserInfo()));
-
-    switch (result) {
-    case SUCCESS:
-      // As when we commit the key, then it is visible in ozone, so we should
-      // increment here.
-      // As key also can have multiple versions, we need to increment keys
-      // only if version is 0. Currently we have not complete support of
-      // versioning of keys. So, this can be revisited later.
-      if (omKeyInfo.getKeyLocationVersions().size() == 1) {
-        omMetrics.incNumKeys();
+            exception, getOmRequest().getUserInfo()));
+
+    processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics,
+            exception, omKeyInfo, result);
+
+    return omClientResponse;
+  }
+
+
+  /**
+   * Check for directory exists with same name, if it exists throw error.
+   *
+   * @param keyName                  key name
+   * @param ozoneManager             Ozone Manager
+   * @param reachedLastPathComponent true if the path component is a fileName
+   * @throws IOException if directory exists with same name
+   */
+  private void checkDirectoryAlreadyExists(String keyName,
+                                           OzoneManager ozoneManager,
+                                           boolean reachedLastPathComponent)
+          throws IOException {
+    // Reached last component, which would be a file. Returns its parentID.
+    if (reachedLastPathComponent && ozoneManager.getEnableFileSystemPaths()) {
+      throw new OMException("Can not create file: " + keyName +
+              " as there is already directory in the given path", NOT_A_FILE);
+    }
+  }
+
+  /**
+   * Get parent id for the user given path.
+   *
+   * @param bucketId          bucket id
+   * @param pathComponents    fie path elements
+   * @param keyName           user given key name
+   * @param omMetadataManager metadata manager
+   * @return lastKnownParentID
+   * @throws IOException DB failure or parent not exists in DirectoryTable
+   */
+  private long getParentID(long bucketId, Iterator<Path> pathComponents,
+                           String keyName, OMMetadataManager omMetadataManager,
+                           OzoneManager ozoneManager)
+          throws IOException {
+
+    long lastKnownParentId = bucketId;
+
+    // If no sub-dirs then bucketID is the root/parent.
+    if(!pathComponents.hasNext()){
+      return bucketId;
+    }
+
+    OmDirectoryInfo omDirectoryInfo;
+    while (pathComponents.hasNext()) {
+      String nodeName = pathComponents.next().toString();
+      boolean reachedLastPathComponent = !pathComponents.hasNext();
+      String dbNodeName =
+              omMetadataManager.getOzonePathKey(lastKnownParentId, nodeName);
+
+      omDirectoryInfo = omMetadataManager.
+              getDirectoryTable().get(dbNodeName);
+      if (omDirectoryInfo != null) {
+        checkDirectoryAlreadyExists(keyName, ozoneManager,
+                reachedLastPathComponent);
+        lastKnownParentId = omDirectoryInfo.getObjectID();
+      } else {
+        // One of the sub-dir doesn't exists in DB. Immediate parent should
+        // exists for committing the key, otherwise will fail the operation.
+        if (!reachedLastPathComponent) {
+          throw new OMException("Failed to commit key, as parent directory of "
+                  + keyName + " entry is not found in DirectoryTable",
+                  KEY_NOT_FOUND);
+        }
+        break;
       }
-      LOG.debug("Key committed. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
-      break;
-    case FAILURE:
-      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}.",
-          volumeName, bucketName, keyName, exception);
-      omMetrics.incNumKeyCommitFails();
-      break;
-    default:
-      LOG.error("Unrecognized Result for OMKeyCommitRequest: {}",
-          commitKeyRequest);
     }
 
-    return omClientResponse;
+    return lastKnownParentId;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 14d8bac..65fbb4b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
@@ -240,38 +241,6 @@ public abstract class OMKeyRequest extends OMClientRequest {
     return edek;
   }
 
-  /**
-   * Create OmKeyInfo object.
-   * @return OmKeyInfo
-   */
-  @SuppressWarnings("parameterNumber")
-  protected OmKeyInfo createKeyInfo(@Nonnull KeyArgs keyArgs,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      @Nonnull HddsProtos.ReplicationFactor factor,
-      @Nonnull HddsProtos.ReplicationType type, long size,
-      @Nullable FileEncryptionInfo encInfo,
-      @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo,
-      long transactionLogIndex, long objectID) {
-    return new OmKeyInfo.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, locations)))
-        .setCreationTime(keyArgs.getModificationTime())
-        .setModificationTime(keyArgs.getModificationTime())
-        .setDataSize(size)
-        .setReplicationType(type)
-        .setReplicationFactor(factor)
-        .setFileEncryptionInfo(encInfo)
-        .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
-        .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
-        .setObjectID(objectID)
-        .setUpdateID(transactionLogIndex)
-        .build();
-  }
-
   protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs,
       OmBucketInfo bucketInfo, PrefixManager prefixManager) {
     List<OzoneAcl> acls = new ArrayList<>();
@@ -311,96 +280,6 @@ public abstract class OMKeyRequest extends OMClientRequest {
   }
 
   /**
-   * Prepare OmKeyInfo which will be persisted to openKeyTable.
-   * @return OmKeyInfo
-   * @throws IOException
-   */
-  @SuppressWarnings("parameternumber")
-  protected OmKeyInfo prepareKeyInfo(
-      @Nonnull OMMetadataManager omMetadataManager,
-      @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      @Nullable FileEncryptionInfo encInfo,
-      @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo,
-      long transactionLogIndex,
-      @Nonnull long objectID,
-      boolean isRatisEnabled)
-      throws IOException {
-    if (keyArgs.getIsMultipartKey()) {
-      return prepareMultipartKeyInfo(omMetadataManager, keyArgs,
-          size, locations, encInfo, prefixManager, omBucketInfo,
-          transactionLogIndex, objectID);
-      //TODO args.getMetadata
-    }
-    if (dbKeyInfo != null) {
-      // TODO: Need to be fixed, as when key already exists, we are
-      //  appending new blocks to existing key.
-      // The key already exist, the new blocks will be added as new version
-      // when locations.size = 0, the new version will have identical blocks
-      // as its previous version
-      dbKeyInfo.addNewVersion(locations, false);
-      dbKeyInfo.setDataSize(size + dbKeyInfo.getDataSize());
-      // The modification time is set in preExecute. Use the same
-      // modification time.
-      dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
-      dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
-      return dbKeyInfo;
-    }
-
-    // the key does not exist, create a new object.
-    // Blocks will be appended as version 0.
-    return createKeyInfo(keyArgs, locations, keyArgs.getFactor(),
-        keyArgs.getType(), keyArgs.getDataSize(), encInfo, prefixManager,
-        omBucketInfo, transactionLogIndex, objectID);
-  }
-
-  /**
-   * Prepare OmKeyInfo for multi-part upload part key which will be persisted
-   * to openKeyTable.
-   * @return OmKeyInfo
-   * @throws IOException
-   */
-  @SuppressWarnings("parameternumber")
-  private OmKeyInfo prepareMultipartKeyInfo(
-      @Nonnull OMMetadataManager omMetadataManager,
-      @Nonnull KeyArgs args, long size,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      FileEncryptionInfo encInfo,  @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo, @Nonnull long transactionLogIndex,
-      @Nonnull long objectId)
-      throws IOException {
-    HddsProtos.ReplicationFactor factor;
-    HddsProtos.ReplicationType type;
-
-    Preconditions.checkArgument(args.getMultipartNumber() > 0,
-        "PartNumber Should be greater than zero");
-    // When key is multipart upload part key, we should take replication
-    // type and replication factor from original key which has done
-    // initiate multipart upload. If we have not found any such, we throw
-    // error no such multipart upload.
-    String uploadID = args.getMultipartUploadID();
-    Preconditions.checkNotNull(uploadID);
-    String multipartKey = omMetadataManager
-        .getMultipartKey(args.getVolumeName(), args.getBucketName(),
-            args.getKeyName(), uploadID);
-    OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
-        multipartKey);
-    if (partKeyInfo == null) {
-      throw new OMException("No such Multipart upload is with specified " +
-          "uploadId " + uploadID,
-          OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-    } else {
-      factor = partKeyInfo.getFactor();
-      type = partKeyInfo.getType();
-    }
-    // For this upload part we don't need to check in KeyTable. As this
-    // is not an actual key, it is a part of the key.
-    return createKeyInfo(args, locations, factor, type, size, encInfo,
-        prefixManager, omBucketInfo, transactionLogIndex, objectId);
-  }
-
-  /**
    * Check Acls for the ozone bucket.
    * @param ozoneManager
    * @param volume
@@ -418,7 +297,6 @@ public abstract class OMKeyRequest extends OMClientRequest {
     }
   }
 
-
   /**
    * Check Acls for the ozone key.
    * @param ozoneManager
@@ -679,4 +557,158 @@ public abstract class OMKeyRequest extends OMClientRequest {
         new CacheKey<>(omMetadataManager.getBucketKey(volume, bucket)))
         .getCacheValue();
   }
+
+  /**
+   * Prepare OmKeyInfo which will be persisted to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  protected OmKeyInfo prepareKeyInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          @Nullable FileEncryptionInfo encInfo,
+          @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          long transactionLogIndex, long objectID, boolean isRatisEnabled)
+          throws IOException {
+
+    return prepareFileInfo(omMetadataManager, keyArgs, dbKeyInfo, size,
+            locations, encInfo, prefixManager, omBucketInfo, null,
+            transactionLogIndex, objectID, isRatisEnabled);
+  }
+
+  /**
+   * Prepare OmKeyInfo which will be persisted to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  protected OmKeyInfo prepareFileInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          @Nullable FileEncryptionInfo encInfo,
+          @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          OMFileRequest.OMPathInfoV1 omPathInfo,
+          long transactionLogIndex, long objectID,
+          boolean isRatisEnabled)
+          throws IOException {
+    if (keyArgs.getIsMultipartKey()) {
+      return prepareMultipartFileInfo(omMetadataManager, keyArgs,
+              size, locations, encInfo, prefixManager, omBucketInfo,
+              omPathInfo, transactionLogIndex, objectID);
+      //TODO args.getMetadata
+    }
+    if (dbKeyInfo != null) {
+      // TODO: Need to be fixed, as when key already exists, we are
+      //  appending new blocks to existing key.
+      // The key already exist, the new blocks will be added as new version
+      // when locations.size = 0, the new version will have identical blocks
+      // as its previous version
+      dbKeyInfo.addNewVersion(locations, false);
+      dbKeyInfo.setDataSize(size + dbKeyInfo.getDataSize());
+      // The modification time is set in preExecute. Use the same
+      // modification time.
+      dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
+      dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
+      return dbKeyInfo;
+    }
+
+    // the key does not exist, create a new object.
+    // Blocks will be appended as version 0.
+    return createFileInfo(keyArgs, locations, keyArgs.getFactor(),
+            keyArgs.getType(), keyArgs.getDataSize(), encInfo, prefixManager,
+            omBucketInfo, omPathInfo, transactionLogIndex, objectID);
+  }
+
+  /**
+   * Create OmKeyInfo object.
+   * @return OmKeyInfo
+   */
+  @SuppressWarnings("parameterNumber")
+  protected OmKeyInfo createFileInfo(@Nonnull KeyArgs keyArgs,
+      @Nonnull List<OmKeyLocationInfo> locations,
+      @Nonnull HddsProtos.ReplicationFactor factor,
+      @Nonnull HddsProtos.ReplicationType type, long size,
+      @Nullable FileEncryptionInfo encInfo,
+      @Nonnull PrefixManager prefixManager,
+      @Nullable OmBucketInfo omBucketInfo,
+      OMFileRequest.OMPathInfoV1 omPathInfo,
+      long transactionLogIndex, long objectID) {
+
+    OmKeyInfo.Builder builder = new OmKeyInfo.Builder();
+    builder.setVolumeName(keyArgs.getVolumeName())
+            .setBucketName(keyArgs.getBucketName())
+            .setKeyName(keyArgs.getKeyName())
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, locations)))
+            .setCreationTime(keyArgs.getModificationTime())
+            .setModificationTime(keyArgs.getModificationTime())
+            .setDataSize(size)
+            .setReplicationType(type)
+            .setReplicationFactor(factor)
+            .setFileEncryptionInfo(encInfo)
+            .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
+            .addAllMetadata(KeyValueUtil.getFromProtobuf(
+                    keyArgs.getMetadataList()))
+            .setUpdateID(transactionLogIndex);
+    if (omPathInfo != null) {
+      // FileTable metadata format
+      objectID = omPathInfo.getLeafNodeObjectId();
+      builder.setParentObjectID(omPathInfo.getLastKnownParentId());
+      builder.setFileName(omPathInfo.getLeafNodeName());
+    }
+    builder.setObjectID(objectID);
+    return builder.build();
+  }
+
+  /**
+   * Prepare OmKeyInfo for multi-part upload part key which will be persisted
+   * to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  private OmKeyInfo prepareMultipartFileInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs args, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          FileEncryptionInfo encInfo,  @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          OMFileRequest.OMPathInfoV1 omPathInfo,
+          @Nonnull long transactionLogIndex, long objectID)
+          throws IOException {
+    HddsProtos.ReplicationFactor factor;
+    HddsProtos.ReplicationType type;
+
+    Preconditions.checkArgument(args.getMultipartNumber() > 0,
+            "PartNumber Should be greater than zero");
+    // When key is multipart upload part key, we should take replication
+    // type and replication factor from original key which has done
+    // initiate multipart upload. If we have not found any such, we throw
+    // error no such multipart upload.
+    String uploadID = args.getMultipartUploadID();
+    Preconditions.checkNotNull(uploadID);
+    String multipartKey = omMetadataManager
+            .getMultipartKey(args.getVolumeName(), args.getBucketName(),
+                    args.getKeyName(), uploadID);
+    OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
+            multipartKey);
+    if (partKeyInfo == null) {
+      throw new OMException("No such Multipart upload is with specified " +
+              "uploadId " + uploadID,
+              OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
+    } else {
+      factor = partKeyInfo.getFactor();
+      type = partKeyInfo.getType();
+    }
+    // For this upload part we don't need to check in KeyTable. As this
+    // is not an actual key, it is a part of the key.
+    return createFileInfo(args, locations, factor, type, size, encInfo,
+            prefixManager, omBucketInfo, omPathInfo, transactionLogIndex,
+            objectID);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
new file mode 100644
index 0000000..ccaaa6b
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for create file request layout version V1.
+ */
+@CleanupTableInfo(cleanupTables = OPEN_FILE_TABLE)
+public class OMFileCreateResponseV1 extends OMFileCreateResponse {
+
+  private List<OmDirectoryInfo> parentDirInfos;
+
+  public OMFileCreateResponseV1(@Nonnull OMResponse omResponse,
+                                @Nonnull OmKeyInfo omKeyInfo,
+                                @Nonnull List<OmDirectoryInfo> parentDirInfos,
+                                long openKeySessionID,
+                                @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, new ArrayList<>(), openKeySessionID,
+        omBucketInfo);
+    this.parentDirInfos = parentDirInfos;
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataMgr,
+                              BatchOperation batchOp) throws IOException {
+
+    /**
+     * Create parent directory entries during Key Create - do not wait
+     * for Key Commit request.
+     * XXX handle stale directory entries.
+     */
+    if (parentDirInfos != null) {
+      for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+        String parentKey = parentDirInfo.getPath();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("putWithBatch adding parent : key {} info : {}", parentKey,
+                  parentDirInfo);
+        }
+        omMetadataMgr.getDirectoryTable().putWithBatch(batchOp, parentKey,
+                parentDirInfo);
+      }
+    }
+
+    OMFileRequest.addToOpenFileTable(omMetadataMgr, batchOp, getOmKeyInfo(),
+            getOpenKeySessionID());
+
+    // update bucket usedBytes.
+    omMetadataMgr.getBucketTable().putWithBatch(batchOp,
+            omMetadataMgr.getBucketKey(getOmKeyInfo().getVolumeName(),
+                    getOmKeyInfo().getBucketName()), getOmBucketInfo());
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
index 5d43b27..ebd3263 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
@@ -79,4 +79,19 @@ public class OMKeyCommitResponse extends OMClientResponse {
             omBucketInfo.getBucketName()), omBucketInfo);
   }
 
+  protected String getOpenKeyName() {
+    return openKeyName;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
+
+  protected String getOzoneKeyName() {
+    return ozoneKeyName;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
similarity index 59%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
index 5d43b27..c0840e3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
@@ -18,65 +18,60 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
-import java.io.IOException;
 import javax.annotation.Nonnull;
 
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
 
 /**
- * Response for CommitKey request.
+ * Response for CommitKey request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE, KEY_TABLE})
-public class OMKeyCommitResponse extends OMClientResponse {
-
-  private OmKeyInfo omKeyInfo;
-  private String ozoneKeyName;
-  private String openKeyName;
-  private OmBucketInfo omBucketInfo;
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, FILE_TABLE})
+public class OMKeyCommitResponseV1 extends OMKeyCommitResponse {
 
-  public OMKeyCommitResponse(@Nonnull OMResponse omResponse,
-      @Nonnull OmKeyInfo omKeyInfo, String ozoneKeyName, String openKeyName,
-      @Nonnull OmBucketInfo omBucketInfo) {
-    super(omResponse);
-    this.omKeyInfo = omKeyInfo;
-    this.ozoneKeyName = ozoneKeyName;
-    this.openKeyName = openKeyName;
-    this.omBucketInfo = omBucketInfo;
+  public OMKeyCommitResponseV1(@Nonnull OMResponse omResponse,
+                               @Nonnull OmKeyInfo omKeyInfo,
+                               String ozoneKeyName, String openKeyName,
+                               @Nonnull OmVolumeArgs omVolumeArgs,
+                               @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, ozoneKeyName, openKeyName,
+            omBucketInfo);
   }
 
   /**
    * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
-  public OMKeyCommitResponse(@Nonnull OMResponse omResponse) {
+  public OMKeyCommitResponseV1(@Nonnull OMResponse omResponse) {
     super(omResponse);
     checkStatusNotOK();
   }
 
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
+                           BatchOperation batchOperation) throws IOException {
 
     // Delete from OpenKey table
     omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-        openKeyName);
+            getOpenKeyName());
 
-    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName,
-        omKeyInfo);
+    OMFileRequest.addToFileTable(omMetadataManager, batchOperation,
+            getOmKeyInfo());
 
     // update bucket usedBytes.
     omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-        omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-            omBucketInfo.getBucketName()), omBucketInfo);
+            omMetadataManager.getBucketKey(getOmBucketInfo().getVolumeName(),
+                    getOmBucketInfo().getBucketName()), getOmBucketInfo());
   }
-
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
index 98b1927..d170ef4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
@@ -69,7 +69,7 @@ public class OMKeyCreateResponse extends OMClientResponse {
   }
 
   @Override
-  protected void addToDBBatch(OMMetadataManager omMetadataManager,
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
     /**
@@ -101,5 +101,17 @@ public class OMKeyCreateResponse extends OMClientResponse {
         omMetadataManager.getBucketKey(omKeyInfo.getVolumeName(),
             omKeyInfo.getBucketName()), omBucketInfo);
   }
+
+  protected long getOpenKeySessionID() {
+    return openKeySessionID;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index a6ca2cf..2d4b5cb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType;
 
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
 import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -814,4 +815,113 @@ public final class TestOMRequestUtils {
         new CacheKey<>(dbVolumeKey),
         new CacheValue<>(Optional.of(omVolumeArgs), 1L));
   }
+
+  /**
+   * Create OmKeyInfo.
+   */
+  @SuppressWarnings("parameterNumber")
+  public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
+      String keyName, HddsProtos.ReplicationType replicationType,
+      HddsProtos.ReplicationFactor replicationFactor, long objectID,
+      long parentID, long trxnLogIndex, long creationTime) {
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return new OmKeyInfo.Builder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(keyName)
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+            .setCreationTime(creationTime)
+            .setModificationTime(Time.now())
+            .setDataSize(1000L)
+            .setReplicationType(replicationType)
+            .setReplicationFactor(replicationFactor)
+            .setObjectID(objectID)
+            .setUpdateID(trxnLogIndex)
+            .setParentObjectID(parentID)
+            .setFileName(fileName)
+            .build();
+  }
+
+
+  /**
+   * Add key entry to KeyTable. if openKeyTable flag is true, add's entries
+   * to openKeyTable, else add's it to keyTable.
+   *
+   * @throws Exception DB failure
+   */
+  public static void addFileToKeyTable(boolean openKeyTable,
+                                       boolean addToCache, String fileName,
+                                       OmKeyInfo omKeyInfo,
+                                       long clientID, long trxnLogIndex,
+                                       OMMetadataManager omMetadataManager)
+          throws Exception {
+    if (openKeyTable) {
+      String ozoneKey = omMetadataManager.getOpenFileName(
+              omKeyInfo.getParentObjectID(), fileName, clientID);
+      if (addToCache) {
+        omMetadataManager.getOpenKeyTable().addCacheEntry(
+                new CacheKey<>(ozoneKey),
+                new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      }
+      omMetadataManager.getOpenKeyTable().put(ozoneKey, omKeyInfo);
+    } else {
+      String ozoneKey = omMetadataManager.getOzonePathKey(
+              omKeyInfo.getParentObjectID(), fileName);
+      if (addToCache) {
+        omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
+                new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      }
+      omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+    }
+  }
+
+  /**
+   * Gets bucketId from OM metadata manager.
+   *
+   * @param volumeName        volume name
+   * @param bucketName        bucket name
+   * @param omMetadataManager metadata manager
+   * @return bucket Id
+   * @throws Exception DB failure
+   */
+  public static long getBucketId(String volumeName, String bucketName,
+                                 OMMetadataManager omMetadataManager)
+          throws Exception {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    return omBucketInfo.getObjectID();
+  }
+
+  /**
+   * Add path components to the directory table and returns last directory's
+   * object id.
+   *
+   * @param volumeName volume name
+   * @param bucketName bucket name
+   * @param key        key name
+   * @param omMetaMgr  metdata manager
+   * @return last directory object id
+   * @throws Exception
+   */
+  public static long addParentsToDirTable(String volumeName, String bucketName,
+                                    String key, OMMetadataManager omMetaMgr)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetaMgr);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long objectId = bucketId + 10;
+    long parentId = bucketId;
+    long txnID = 50;
+    for (String pathElement : pathComponents) {
+      OmDirectoryInfo omDirInfo =
+              TestOMRequestUtils.createOmDirectoryInfo(pathElement, ++objectId,
+                      parentId);
+      TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo,
+              txnID, omMetaMgr);
+      parentId = omDirInfo.getObjectID();
+    }
+    return parentId;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
index 77cf74b..f0f0320 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -88,6 +88,7 @@ public class TestOMDirectoryCreateRequestV1 {
     OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
             folder.newFolder().getAbsolutePath());
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
     when(ozoneManager.getMetrics()).thenReturn(omMetrics);
     when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
index a500f4c..a963f88 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.request.file;
 import java.util.List;
 import java.util.UUID;
 
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -55,8 +56,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
         false, false);
 
-    OMFileCreateRequest omFileCreateRequest =
-        new OMFileCreateRequest(omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
     Assert.assertNotEquals(omRequest, modifiedOmRequest);
@@ -96,8 +96,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
         false, false);
 
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
     Assert.assertNotEquals(omRequest, modifiedOmRequest);
@@ -121,21 +120,17 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
     long id = modifiedOmRequest.getCreateFileRequest().getClientID();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
-
     // Before calling
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(keyName, id, false);
     Assert.assertNull(omKeyInfo);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -146,8 +141,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
 
     // Check open table whether key is added or not.
 
-    omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-    Assert.assertNotNull(omKeyInfo);
+    omKeyInfo = verifyPathInOpenKeyTable(keyName, id, true);
 
     List< OmKeyLocationInfo > omKeyLocationInfoList =
         omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -179,12 +173,11 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
             false, true);
 
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -200,13 +193,11 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         false, true);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
-
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -311,8 +302,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
     testNonRecursivePath(key, false, false, true);
   }
 
-
-  private void testNonRecursivePath(String key,
+  protected void testNonRecursivePath(String key,
       boolean overWrite, boolean recursive, boolean fail) throws Exception {
     OMRequest omRequest = createFileRequest(volumeName, bucketName, key,
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
@@ -320,12 +310,11 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -341,10 +330,9 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
       Assert.assertTrue(omFileCreateResponse.getOMResponse().getSuccess());
       long id = modifiedOmRequest.getCreateFileRequest().getClientID();
 
-      String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-          key, id);
-      OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-      Assert.assertNotNull(omKeyInfo);
+      verifyKeyNameInCreateFileResponse(key, omFileCreateResponse);
+
+      OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(key, id, true);
 
       List< OmKeyLocationInfo > omKeyLocationInfoList =
           omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -368,6 +356,14 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
     }
   }
 
+  private void verifyKeyNameInCreateFileResponse(String key,
+      OMClientResponse omFileCreateResponse) {
+    OzoneManagerProtocolProtos.CreateFileResponse createFileResponse =
+            omFileCreateResponse.getOMResponse().getCreateFileResponse();
+    String actualFileName = createFileResponse.getKeyInfo().getKeyName();
+    Assert.assertEquals("Incorrect keyName", key, actualFileName);
+  }
+
   /**
    * Create OMRequest which encapsulates OMFileCreateRequest.
    * @param volumeName
@@ -377,7 +373,8 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
    * @param replicationType
    * @return OMRequest
    */
-  private OMRequest createFileRequest(
+  @NotNull
+  protected OMRequest createFileRequest(
       String volumeName, String bucketName, String keyName,
       HddsProtos.ReplicationFactor replicationFactor,
       HddsProtos.ReplicationType replicationType, boolean overWrite,
@@ -399,4 +396,38 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         .setCreateFileRequest(createFileRequest).build();
 
   }
+
+  /**
+   * Verify path in open key table. Also, it returns OMKeyInfo for the given
+   * key path.
+   *
+   * @param key      key name
+   * @param id       client id
+   * @param doAssert if true then do assertion, otherwise it just skip.
+   * @return om key info for the given key path.
+   * @throws Exception DB failure
+   */
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+                                               boolean doAssert)
+          throws Exception {
+    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
+            key, id);
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    if (doAssert) {
+      Assert.assertNotNull("Failed to find key in OpenKeyTable", omKeyInfo);
+    }
+    return omKeyInfo;
+  }
+
+  /**
+   * Gets OMFileCreateRequest reference.
+   *
+   * @param omRequest om request
+   * @return OMFileCreateRequest reference
+   */
+  @NotNull
+  protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest){
+    return new OMFileCreateRequest(omRequest);
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
new file mode 100644
index 0000000..7ded386
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.UUID;
+
+/**
+ * Tests OMFileCreateRequest layout version V1.
+ */
+public class TestOMFileCreateRequestV1 extends TestOMFileCreateRequest {
+
+  @Test
+  public void testValidateAndUpdateCacheWithNonRecursive() throws Exception {
+    testNonRecursivePath(UUID.randomUUID().toString(), false, false, false);
+    testNonRecursivePath("a/b", false, false, true);
+    Assert.assertEquals("Invalid metrics value", 0, omMetrics.getNumKeys());
+
+    // Create parent dirs for the path
+    TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            "a/b/c", omMetadataManager);
+    String fileNameD = "d";
+    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
+            "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+
+    // cannot create file if directory of same name exists
+    testNonRecursivePath("a/b/c", false, false, true);
+
+    // Delete child key but retain path "a/b/ in the key table
+    OmDirectoryInfo dirPathC = getDirInfo("a/b/c");
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+    String dbFileD = omMetadataManager.getOzonePathKey(
+            dirPathC.getObjectID(), fileNameD);
+    omMetadataManager.getKeyTable().delete(dbFileD);
+    omMetadataManager.getKeyTable().delete(dirPathC.getPath());
+
+    // can create non-recursive because parents already exist.
+    testNonRecursivePath("a/b/e", false, false, false);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithRecursiveAndOverWrite()
+          throws Exception {
+    String key = "c/d/e/f";
+    // Should be able to create file even if parent directories does not exist
+    testNonRecursivePath(key, false, true, false);
+    Assert.assertEquals("Invalid metrics value", 3, omMetrics.getNumKeys());
+
+    // Add the key to key table
+    OmDirectoryInfo omDirInfo = getDirInfo("c/d/e");
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    omDirInfo.getObjectID() + 10,
+                    omDirInfo.getObjectID(), 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            "f", omKeyInfo, -1,
+            omDirInfo.getObjectID() + 10, omMetadataManager);
+
+    // Even if key exists, should be able to create file as overwrite is set
+    // to true
+    testNonRecursivePath(key, true, true, false);
+    testNonRecursivePath(key, false, true, true);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite()
+          throws Exception {
+    String parentDir = "c/d/e";
+    String fileName = "f";
+    String key = parentDir + "/" + fileName;
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, parentDir, omMetadataManager);
+
+    // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is
+    // non-recursive parent should exist.
+    testNonRecursivePath(key, false, false, false);
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            fileName, omKeyInfo, -1, 50, omMetadataManager);
+
+    // Even if key exists in KeyTable, should be able to create file as
+    // overwrite is set to true
+    testNonRecursivePath(key, true, false, false);
+    testNonRecursivePath(key, false, false, true);
+  }
+
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+                                             boolean doAssert)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      if (indx == pathComponents.length - 1) {
+        String dbOpenFileName = omMetadataManager.getOpenFileName(
+                parentId, pathElement, id);
+        OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+                .get(dbOpenFileName);
+        if (doAssert) {
+          Assert.assertNotNull("Invalid key!", omKeyInfo);
+        }
+        return omKeyInfo;
+      } else {
+        // directory
+        String dbKey = omMetadataManager.getOzonePathKey(parentId,
+                pathElement);
+        OmDirectoryInfo dirInfo =
+                omMetadataManager.getDirectoryTable().get(dbKey);
+        parentId = dirInfo.getObjectID();
+      }
+    }
+    if (doAssert) {
+      Assert.fail("Invalid key!");
+    }
+    return null;
+  }
+
+  private OmDirectoryInfo getDirInfo(String key)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    OmDirectoryInfo dirInfo = null;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      // directory
+      String dbKey = omMetadataManager.getOzonePathKey(parentId,
+              pathElement);
+      dirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      parentId = dirInfo.getObjectID();
+    }
+    return dirInfo;
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+
+  protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) {
+    return new OMFileCreateRequestV1(omRequest);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
index b327b76..09d499e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
@@ -19,12 +19,15 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -48,6 +51,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
  */
 public class TestOMKeyCommitRequest extends TestOMKeyRequest {
 
+  private String parentDir;
+
   @Test
   public void testPreExecute() throws Exception {
     doPreExecute(createCommitKeyRequest());
@@ -56,20 +61,15 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
   @Test
   public void testValidateAndUpdateCache() throws Exception {
 
-    OMRequest modifiedOmRequest =
-        doPreExecute(createCommitKeyRequest());
+    OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = addKeyToOpenKeyTable();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -92,6 +92,8 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
     omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
 
     Assert.assertNotNull(omKeyInfo);
+    // DB keyInfo format
+    verifyKeyName(omKeyInfo);
 
     // Check modification time
 
@@ -107,7 +109,14 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
 
     Assert.assertEquals(locationInfoListFromCommitKeyRequest,
         omKeyInfo.getLatestVersionLocations().getLocationList());
+  }
 
+  @Test
+  public void testValidateAndUpdateCacheWithSubDirs() throws Exception {
+    parentDir = "dir1/dir2/dir3/";
+    keyName = parentDir + UUID.randomUUID().toString();
+
+    testValidateAndUpdateCache();
   }
 
   @Test
@@ -117,10 +126,9 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
+            getOmKeyCommitRequest(modifiedOmRequest);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -147,13 +155,11 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
         omMetadataManager);
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -180,14 +186,12 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -216,7 +220,7 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
   private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(originalOMRequest);
+            getOmKeyCommitRequest(originalOMRequest);
 
     OMRequest modifiedOmRequest = omKeyCommitRequest.preExecute(ozoneManager);
 
@@ -294,4 +298,34 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
     return keyLocations;
   }
 
+  protected String getParentDir() {
+    return parentDir;
+  }
+
+  @NotNull
+  protected String getOzonePathKey() throws IOException {
+    return omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+  }
+
+  @NotNull
+  protected String addKeyToOpenKeyTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
+            clientID, replicationType, replicationFactor, omMetadataManager);
+
+    return getOzonePathKey();
+  }
+
+  @NotNull
+  protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+    return new OMKeyCommitRequest(omRequest);
+  }
+
+  protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+    Assert.assertEquals("Incorrect KeyName", keyName,
+            omKeyInfo.getKeyName());
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Assert.assertEquals("Incorrect FileName", fileName,
+            omKeyInfo.getFileName());
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
new file mode 100644
index 0000000..f5168e1
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.io.IOException;
+
+/**
+ * Class tests OMKeyCommitRequestV1 class layout version V1.
+ */
+public class TestOMKeyCommitRequestV1 extends TestOMKeyCommitRequest {
+
+  private long parentID = Long.MIN_VALUE;
+
+  private long getBucketID() throws java.io.IOException {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    if(omBucketInfo!= null){
+      return omBucketInfo.getObjectID();
+    }
+    // bucket doesn't exists in DB
+    return Long.MIN_VALUE;
+  }
+
+  @Override
+  protected String getOzonePathKey() throws IOException {
+    long bucketID = getBucketID();
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return omMetadataManager.getOzonePathKey(bucketID, fileName);
+  }
+
+  @Override
+  protected String addKeyToOpenKeyTable() throws Exception {
+    // need to initialize parentID
+    if (getParentDir() == null) {
+      parentID = getBucketID();
+    } else {
+      parentID = TestOMRequestUtils.addParentsToDirTable(volumeName,
+              bucketName, getParentDir(), omMetadataManager);
+    }
+    long objectId = 100;
+
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+                    Time.now());
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+
+    return omMetadataManager.getOzonePathKey(parentID, fileName);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+
+  @NotNull
+  protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+    return new OMKeyCommitRequestV1(omRequest);
+  }
+
+  protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+    // V1 format - stores fileName in the keyName DB field.
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Assert.assertEquals("Incorrect FileName", fileName,
+            omKeyInfo.getFileName());
+    Assert.assertEquals("Incorrect KeyName", fileName,
+            omKeyInfo.getKeyName());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
index 7bf43a7..4bf66bb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.om.KeyManagerImpl;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -91,6 +92,7 @@ public class TestOMKeyRequest {
   protected long scmBlockSize = 1000L;
   protected long dataSize;
   protected Random random;
+  protected long txnLogId = 100000L;
 
   // Just setting ozoneManagerDoubleBuffer which does nothing.
   protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
@@ -103,7 +105,7 @@ public class TestOMKeyRequest {
   public void setup() throws Exception {
     ozoneManager = Mockito.mock(OzoneManager.class);
     omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    OzoneConfiguration ozoneConfiguration = getOzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
         folder.newFolder().getAbsolutePath());
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
@@ -172,6 +174,11 @@ public class TestOMKeyRequest {
         .thenReturn(new ResolvedBucket(volumeAndBucket, volumeAndBucket));
   }
 
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @After
   public void stop() {
     omMetrics.unRegister();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
new file mode 100644
index 0000000..19a1bb9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
+import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests MKeyCreateResponse layout version V1.
+ */
+public class TestOMFileCreateResponseV1 extends TestOMKeyCreateResponse {
+
+  @NotNull
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(omBucketInfo);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
+  }
+
+  @NotNull
+  @Override
+  protected String getOpenKeyName() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOpenFileName(
+            omBucketInfo.getObjectID(), keyName, clientID);
+  }
+
+  @NotNull
+  @Override
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMFileCreateResponseV1(response, keyInfo, null, clientID,
+            bucketInfo);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
index 2d63ebd..4d50337 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -30,17 +31,20 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 /**
  * Tests OMKeyCommitResponse.
  */
+@SuppressWarnings("visibilitymodifier")
 public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
+  protected OmBucketInfo omBucketInfo;
+
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse(
             OzoneManagerProtocolProtos.CommitKeyResponse.getDefaultInstance())
@@ -50,17 +54,14 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
     // As during commit Key, entry will be already there in openKeyTable.
     // Adding it here.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKeyName();
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-    OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
-        omResponse, omKeyInfo, ozoneKey, openKey, omBucketInfo);
+    String ozoneKey = getOzoneKey();
+    OMKeyCommitResponse omKeyCommitResponse = getOmKeyCommitResponse(
+            omKeyInfo, omResponse, openKey, ozoneKey);
 
     omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation);
 
@@ -69,8 +70,7 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
     // When key commit key is deleted from openKey table and added to keyTable.
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
   }
 
   @Test
@@ -78,7 +78,7 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
     OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
         bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
@@ -89,18 +89,15 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
             .setCmdType(OzoneManagerProtocolProtos.Type.CommitKey)
             .build();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String openKey = getOpenKeyName();
+    String ozoneKey = getOzoneKey();
 
-    OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
-        omResponse, omKeyInfo, ozoneKey, openKey, omBucketInfo);
+    OMKeyCommitResponse omKeyCommitResponse = getOmKeyCommitResponse(
+            omKeyInfo, omResponse, openKey, ozoneKey);
 
     // As during commit Key, entry will be already there in openKeyTable.
     // Adding it here.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable();
 
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
@@ -113,7 +110,28 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
     // As omResponse is error it is a no-op. So, entry should still be in
     // openKey table.
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey));
+  }
+
+  @NotNull
+  protected void addKeyToOpenKeyTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
+            clientID, replicationType, replicationFactor, omMetadataManager);
+  }
+
+  @NotNull
+  protected String getOzoneKey() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOzoneKey(volumeName,
+            omBucketInfo.getBucketName(), keyName);
+  }
+
+  @NotNull
+  protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo,
+          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+          String ozoneKey) {
+    Assert.assertNotNull(omBucketInfo);
+    return new OMKeyCommitResponse(omResponse, omKeyInfo, ozoneKey, openKey,
+            omBucketInfo);
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
new file mode 100644
index 0000000..369faa9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests OMKeyCommitResponse layout version V1.
+ */
+public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
+
+  @NotNull
+  protected OMKeyCommitResponse getOmKeyCommitResponse(
+          OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
+          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+          String ozoneKey) {
+    Assert.assertNotNull(omBucketInfo);
+    return new OMKeyCommitResponseV1(
+            omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
+            omBucketInfo);
+  }
+
+  @NotNull
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(omBucketInfo);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
+  }
+
+  @NotNull
+  @Override
+  protected void addKeyToOpenKeyTable() throws Exception {
+    Assert.assertNotNull(omBucketInfo);
+    long parentID = omBucketInfo.getObjectID();
+    long objectId = parentID + 10;
+
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+                    Time.now());
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+  }
+
+  @NotNull
+  @Override
+  protected String getOpenKeyName() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOpenFileName(
+            omBucketInfo.getObjectID(), keyName, clientID);
+  }
+
+  @NotNull
+  @Override
+  protected String getOzoneKey() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOzonePathKey(omBucketInfo.getObjectID(),
+            keyName);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
index 4bef2ef..7566afb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
@@ -20,11 +20,11 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .CreateKeyResponse;
@@ -40,13 +40,12 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
                 CreateKeyResponse.getDefaultInstance())
             .setStatus(OzoneManagerProtocolProtos.Status.OK)
@@ -54,11 +53,11 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
             .build();
 
     OMKeyCreateResponse omKeyCreateResponse =
-        new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
-            omBucketInfo);
+            getOmKeyCreateResponse(omKeyInfo, omBucketInfo,
+                    omResponse);
+
+    String openKey = getOpenKeyName();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
     omKeyCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
 
@@ -70,13 +69,13 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
 
   @Test
   public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
 
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
         CreateKeyResponse.getDefaultInstance())
         .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
@@ -84,12 +83,11 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
         .build();
 
     OMKeyCreateResponse omKeyCreateResponse =
-        new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
-            omBucketInfo);
+            getOmKeyCreateResponse(omKeyInfo, omBucketInfo,
+                    omResponse);
 
     // Before calling addToDBBatch
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKeyName();
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
     omKeyCreateResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
@@ -101,4 +99,12 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
   }
+
+  @NotNull
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMKeyCreateResponse(response, keyInfo, null, clientID,
+            bucketInfo);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
index 312fcaf..df7fd01 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
@@ -21,6 +21,10 @@ package org.apache.hadoop.ozone.om.response.key;
 import java.util.Random;
 import java.util.UUID;
 
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -50,12 +54,14 @@ public class TestOMKeyResponse {
   protected String keyName;
   protected HddsProtos.ReplicationFactor replicationFactor;
   protected HddsProtos.ReplicationType replicationType;
+  protected OmBucketInfo omBucketInfo;
   protected long clientID;
   protected Random random;
+  protected long txnLogId = 100000L;
 
   @Before
   public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    OzoneConfiguration ozoneConfiguration = getOzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
         folder.newFolder().getAbsolutePath());
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
@@ -70,6 +76,23 @@ public class TestOMKeyResponse {
     random = new Random();
   }
 
+  @NotNull
+  protected String getOpenKeyName() {
+    return omMetadataManager.getOpenKey(volumeName, bucketName, keyName,
+            clientID);
+  }
+
+  @NotNull
+  protected OmKeyInfo getOmKeyInfo() {
+    return TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+            replicationType, replicationFactor);
+  }
+
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @After
   public void stop() {
     Mockito.framework().clearInlineMocks();

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 27/29: HDDS-4932. [FSO] Provide list subpaths function to perform recursive ACL check during delete and rename op (#2008)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit be94af8c453be9d6fd1f69234fc2173eaed725e7
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Sat Mar 27 08:30:13 2021 +0530

    HDDS-4932. [FSO] Provide list subpaths function to perform recursive ACL check during delete and rename op (#2008)
---
 .../apache/hadoop/ozone/security/acl/OzoneObj.java |   2 +
 .../hadoop/ozone/security/acl/OzoneObjInfo.java    |  20 ++-
 .../hadoop/ozone/security/acl/OzonePrefixPath.java |  67 +++++++++
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  62 ++++++++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  42 ++----
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  17 +++
 .../hadoop/ozone/om/OzonePrefixPathImpl.java       | 161 +++++++++++++++++++++
 .../hadoop/ozone/om/request/OMClientRequest.java   |  48 ++++++
 .../ozone/om/request/file/OMFileRequest.java       |  36 +++++
 .../ozone/om/request/key/OMKeyDeleteRequestV1.java |   8 +-
 .../ozone/om/request/key/OMKeyRenameRequestV1.java |  17 ++-
 .../om/request/key/TestOMKeyDeleteRequestV1.java   |  68 +++++++++
 .../hadoop/ozone/security/acl/TestOzoneObj.java    |  77 ++++++++++
 .../ozone/security/acl/TestRequestContext.java     |   7 +-
 14 files changed, 587 insertions(+), 45 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
index 4a95e55..1916d25 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
@@ -73,6 +73,8 @@ public abstract class OzoneObj implements IOzoneObj {
 
   public abstract String getKeyName();
 
+  public abstract OzonePrefixPath getOzonePrefixPathViewer();
+
   /**
    * Get PrefixName.
    * A prefix name is like a key name under the bucket but
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
index 42ddbb9..76fb76a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
@@ -37,6 +37,8 @@ public final class OzoneObjInfo extends OzoneObj {
   private final String bucketName;
   private final String name;
 
+  private OzonePrefixPath ozonePrefixPath;
+
   /**
    *
    * @param resType
@@ -46,11 +48,13 @@ public final class OzoneObjInfo extends OzoneObj {
    * @param name - keyName/PrefixName
    */
   private OzoneObjInfo(ResourceType resType, StoreType storeType,
-      String volumeName, String bucketName, String name) {
+      String volumeName, String bucketName, String name,
+      OzonePrefixPath ozonePrefixPath) {
     super(resType, storeType);
     this.volumeName = volumeName;
     this.bucketName = bucketName;
     this.name = name;
+    this.ozonePrefixPath = ozonePrefixPath;
   }
 
   @Override
@@ -95,6 +99,10 @@ public final class OzoneObjInfo extends OzoneObj {
     return name;
   }
 
+  @Override
+  public OzonePrefixPath getOzonePrefixPathViewer() {
+    return ozonePrefixPath;
+  }
 
   public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj
       proto) {
@@ -154,6 +162,7 @@ public final class OzoneObjInfo extends OzoneObj {
     private String volumeName;
     private String bucketName;
     private String name;
+    private OzonePrefixPath ozonePrefixPath;
 
     public static Builder newBuilder() {
       return new Builder();
@@ -207,8 +216,15 @@ public final class OzoneObjInfo extends OzoneObj {
       return this;
     }
 
+    public Builder setOzonePrefixPath(OzonePrefixPath ozonePrefixPathViewer) {
+      this.ozonePrefixPath = ozonePrefixPathViewer;
+      return this;
+    }
+
+
     public OzoneObjInfo build() {
-      return new OzoneObjInfo(resType, storeType, volumeName, bucketName, name);
+      return new OzoneObjInfo(resType, storeType, volumeName, bucketName,
+          name, ozonePrefixPath);
     }
   }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzonePrefixPath.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzonePrefixPath.java
new file mode 100644
index 0000000..4e91d5a
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzonePrefixPath.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.security.acl;
+
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * Interface used to lists immediate children(sub-paths) for a given keyPrefix.
+ */
+public interface OzonePrefixPath {
+
+  /**
+   * Returns file status for the given key path.
+   *
+   * @return OzoneFileStatus for the given path.
+   */
+  OzoneFileStatus getOzoneFileStatus();
+
+  /**
+   * Lists immediate children(files or a directories) of the given keyPrefix.
+   * It won't do recursive traversal. The given keyPrefix parameter should be a
+   * directory type.
+   *
+   * Assume following is the Ozone FS tree structure.
+   *
+   *                  buck-1
+   *                    |
+   *                    a
+   *                    |
+   *      -----------------------------------
+   *     |           |                       |
+   *     b1          b2                      b3
+   *   -----       --------               ----------
+   *   |    |      |    |   |             |    |     |
+   *  c1   c2     d1   d2  d3             e1   e2   e3
+   *                   |                  |
+   *               --------               |
+   *              |        |              |
+   *           d21.txt   d22.txt        e11.txt
+   *
+   * Say, KeyPrefix = "a" will return immediate children [a/b1, a/b2, a/b3].
+   * Say, KeyPrefix = "a/b2" will return children [a/b2/d1, a/b2/d2, a/b2/d3].
+   *
+   * @param keyPrefix  keyPrefix name
+   * @return list of immediate files or directories under the given keyPrefix.
+   * @throws IOException
+   */
+  Iterator<? extends OzoneFileStatus> getChildren(String keyPrefix)
+      throws IOException;
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index a8e89e1..3d25e72 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.Set;
 import java.util.TreeSet;
 
@@ -48,9 +49,12 @@ import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
 import org.apache.hadoop.ozone.om.TrashPolicyOzone;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -63,6 +67,7 @@ import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
 import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -1265,4 +1270,61 @@ public class TestOzoneFileSystem {
       }
     }, 1000, 120000);
   }
+
+  @Test
+  public void testListStatusOnLargeDirectoryForACLCheck() throws Exception {
+    String keyName = "dir1/dir2/testListStatusOnLargeDirectoryForACLCheck";
+    Path root = new Path(OZONE_URI_DELIMITER, keyName);
+    Set<String> paths = new TreeSet<>();
+    int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2;
+    for (int i = 0; i < numDirs; i++) {
+      Path p = new Path(root, String.valueOf(i));
+      getFs().mkdirs(p);
+      paths.add(keyName + OM_KEY_PREFIX + p.getName());
+    }
+
+    // unknown keyname
+    try {
+      new OzonePrefixPathImpl(getVolumeName(), getBucketName(), "invalidKey",
+          cluster.getOzoneManager().getKeyManager());
+      Assert.fail("Non-existent key name!");
+    } catch (OMException ome) {
+      Assert.assertEquals(OMException.ResultCodes.KEY_NOT_FOUND,
+          ome.getResult());
+    }
+
+    OzonePrefixPathImpl ozonePrefixPath =
+        new OzonePrefixPathImpl(getVolumeName(), getBucketName(), keyName,
+            cluster.getOzoneManager().getKeyManager());
+
+    OzoneFileStatus status = ozonePrefixPath.getOzoneFileStatus();
+    Assert.assertNotNull(status);
+    Assert.assertEquals(keyName, status.getTrimmedName());
+    Assert.assertTrue(status.isDirectory());
+
+    Iterator<? extends OzoneFileStatus> pathItr =
+        ozonePrefixPath.getChildren(keyName);
+    Assert.assertTrue("Failed to list keyPath:" + keyName, pathItr.hasNext());
+
+    Set<String> actualPaths = new TreeSet<>();
+    while (pathItr.hasNext()) {
+      String pathname = pathItr.next().getTrimmedName();
+      actualPaths.add(pathname);
+
+      // no subpaths, expected an empty list
+      Iterator<? extends OzoneFileStatus> subPathItr =
+          ozonePrefixPath.getChildren(pathname);
+      Assert.assertNotNull(subPathItr);
+      Assert.assertFalse("Failed to list keyPath: " + pathname,
+          subPathItr.hasNext());
+    }
+
+    Assert.assertEquals("ListStatus failed", paths.size(),
+        actualPaths.size());
+
+    for (String pathname : actualPaths) {
+      paths.remove(pathname);
+    }
+    Assert.assertTrue("ListStatus failed:" + paths, paths.isEmpty());
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index b2ff866..422a915 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -270,28 +270,6 @@ public class KeyManagerImpl implements KeyManager {
     return metadataManager.getBucketTable().get(bucketKey);
   }
 
-  private void validateBucket(String volumeName, String bucketName)
-      throws IOException {
-    String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-    // Check if bucket exists
-    if (metadataManager.getBucketTable().get(bucketKey) == null) {
-      String volumeKey = metadataManager.getVolumeKey(volumeName);
-      // If the volume also does not exist, we should throw volume not found
-      // exception
-      if (metadataManager.getVolumeTable().get(volumeKey) == null) {
-        LOG.error("volume not found: {}", volumeName);
-        throw new OMException("Volume not found",
-            VOLUME_NOT_FOUND);
-      }
-
-      // if the volume exists but bucket does not exist, throw bucket not found
-      // exception
-      LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
-      throw new OMException("Bucket not found",
-          BUCKET_NOT_FOUND);
-    }
-  }
-
   /**
    * Check S3 bucket exists or not.
    * @param volumeName
@@ -322,7 +300,7 @@ public class KeyManagerImpl implements KeyManager {
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
-    validateBucket(volumeName, bucketName);
+    OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
     String openKey = metadataManager.getOpenKey(
         volumeName, bucketName, keyName, clientID);
 
@@ -431,7 +409,7 @@ public class KeyManagerImpl implements KeyManager {
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
-    validateBucket(volumeName, bucketName);
+    OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
 
     long currentTime = UniqueId.next();
     OmKeyInfo keyInfo;
@@ -615,7 +593,7 @@ public class KeyManagerImpl implements KeyManager {
     try {
       metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName,
           bucketName);
-      validateBucket(volumeName, bucketName);
+      OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
       OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(openKey);
       if (keyInfo == null) {
         throw new OMException("Failed to commit key, as " + openKey + "entry " +
@@ -1577,7 +1555,7 @@ public class KeyManagerImpl implements KeyManager {
 
     metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, bucket);
     try {
-      validateBucket(volume, bucket);
+      OMFileRequest.validateBucket(metadataManager, volume, bucket);
       String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
       OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
       if (keyInfo == null) {
@@ -1621,7 +1599,7 @@ public class KeyManagerImpl implements KeyManager {
 
     metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, bucket);
     try {
-      validateBucket(volume, bucket);
+      OMFileRequest.validateBucket(metadataManager, volume, bucket);
       String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
       OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
       if (keyInfo == null) {
@@ -1662,7 +1640,7 @@ public class KeyManagerImpl implements KeyManager {
 
     metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, bucket);
     try {
-      validateBucket(volume, bucket);
+      OMFileRequest.validateBucket(metadataManager, volume, bucket);
       String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
       OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
       if (keyInfo == null) {
@@ -1701,7 +1679,7 @@ public class KeyManagerImpl implements KeyManager {
     OmKeyInfo keyInfo;
     metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket);
     try {
-      validateBucket(volume, bucket);
+      OMFileRequest.validateBucket(metadataManager, volume, bucket);
       String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
       if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
         keyInfo = getOmKeyInfoV1(volume, bucket, keyName);
@@ -1750,7 +1728,7 @@ public class KeyManagerImpl implements KeyManager {
 
     metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket);
     try {
-      validateBucket(volume, bucket);
+      OMFileRequest.validateBucket(metadataManager, volume, bucket);
       OmKeyInfo keyInfo;
 
       // For Acl Type "WRITE", the key can only be found in
@@ -1885,7 +1863,7 @@ public class KeyManagerImpl implements KeyManager {
     try {
       // Check if this is the root of the filesystem.
       if (keyName.length() == 0) {
-        validateBucket(volumeName, bucketName);
+        OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
         return new OzoneFileStatus();
       }
 
@@ -1942,7 +1920,7 @@ public class KeyManagerImpl implements KeyManager {
     try {
       // Check if this is the root of the filesystem.
       if (keyName.length() == 0) {
-        validateBucket(volumeName, bucketName);
+        OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
         return new OzoneFileStatus();
       }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 2b4484e..d50ff3d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -1771,6 +1771,21 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         .setAclRights(aclType)
         .setOwnerName(volumeOwner)
         .build();
+
+    return checkAcls(obj, context, throwIfPermissionDenied);
+  }
+
+  /**
+   * CheckAcls for the ozone object.
+   *
+   * @return true if permission granted, false if permission denied.
+   * @throws OMException ResultCodes.PERMISSION_DENIED if permission denied
+   *                     and throwOnPermissionDenied set to true.
+   */
+  public boolean checkAcls(OzoneObj obj, RequestContext context,
+                           boolean throwIfPermissionDenied)
+      throws OMException {
+
     if (!accessAuthorizer.checkAccess(obj, context)) {
       if (throwIfPermissionDenied) {
         LOG.warn("User {} doesn't have {} permission to access {} /{}/{}/{}",
@@ -1790,6 +1805,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     }
   }
 
+
+
   /**
    * Return true if Ozone acl's are enabled, else false.
    *
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java
new file mode 100644
index 0000000..c30d5dd
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.security.acl.OzonePrefixPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+
+public class OzonePrefixPathImpl implements OzonePrefixPath {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OzonePrefixPathImpl.class);
+  private String volumeName;
+  private String bucketName;
+  private KeyManager keyManager;
+  // TODO: based on need can make batchSize configurable.
+  private int batchSize = 1000;
+  private OzoneFileStatus pathStatus;
+
+  public OzonePrefixPathImpl(String volumeName, String bucketName,
+      String keyPrefix, KeyManager keyManagerImpl) throws IOException {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.keyManager = keyManagerImpl;
+
+    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyPrefix)
+        .setRefreshPipeline(false)
+        .build();
+    try {
+      pathStatus = keyManager.getFileStatus(omKeyArgs);
+    } catch (OMException ome) {
+      // In existing code non-FSO code, ozone client delete and rename expects
+      // KNF error code. So converting FNF to KEY_NOT_FOUND error code.
+      if (ome.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) {
+        throw new OMException(ome.getMessage(), KEY_NOT_FOUND);
+      }
+      throw ome;
+    }
+  }
+
+  @Override
+  public OzoneFileStatus getOzoneFileStatus() {
+    return pathStatus;
+  }
+
+  @Override
+  public Iterator<? extends OzoneFileStatus> getChildren(String keyPrefix)
+      throws IOException {
+
+    return new PathIterator(keyPrefix);
+  }
+
+  class PathIterator implements Iterator<OzoneFileStatus> {
+    private Iterator<OzoneFileStatus> currentIterator;
+    private String keyPrefix;
+    private OzoneFileStatus currentValue;
+
+    /**
+     * Creates an Iterator to iterate over all sub paths of the given keyPrefix.
+     *
+     * @param keyPrefix
+     */
+    PathIterator(String keyPrefix) throws IOException {
+      this.keyPrefix = keyPrefix;
+      this.currentValue = null;
+      List<OzoneFileStatus> statuses = getNextListOfKeys("");
+      if (statuses.size() == 1) {
+        OzoneFileStatus keyStatus = statuses.get(0);
+        if (keyStatus.isFile() && StringUtils.equals(keyPrefix,
+            keyStatus.getTrimmedName())) {
+          throw new OMException("Invalid keyPrefix: " + keyPrefix +
+              ", file type is not allowed, expected directory type.",
+              OMException.ResultCodes.INVALID_KEY_NAME);
+        }
+      }
+      this.currentIterator = statuses.iterator();
+    }
+
+    @Override
+    public boolean hasNext() {
+      if (!currentIterator.hasNext() && currentValue != null) {
+        String keyName = "";
+        try {
+          keyName = currentValue.getTrimmedName();
+          currentIterator =
+              getNextListOfKeys(keyName).iterator();
+        } catch (IOException e) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Exception while listing keys, keyName:" + keyName, e);
+          }
+          return false;
+        }
+      }
+      return currentIterator.hasNext();
+    }
+
+    @Override
+    public OzoneFileStatus next() {
+      if (hasNext()) {
+        currentValue = currentIterator.next();
+        return currentValue;
+      }
+      throw new NoSuchElementException();
+    }
+
+    /**
+     * Gets the next set of key list using keyManager OM interface.
+     *
+     * @param prevKey
+     * @return {@code List<OzoneFileStatus>}
+     */
+    List<OzoneFileStatus> getNextListOfKeys(String prevKey) throws
+        IOException {
+
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(volumeName)
+          .setBucketName(bucketName)
+          .setKeyName(keyPrefix)
+          .setRefreshPipeline(false)
+          .build();
+
+      List<OzoneFileStatus> statuses = keyManager.listStatus(omKeyArgs, false,
+          prevKey, batchSize);
+
+      // ListStatuses with non-null startKey will add startKey as first element
+      // in the resultList. Remove startKey element as it is duplicated one.
+      if (!statuses.isEmpty() && StringUtils.equals(prevKey,
+          statuses.get(0).getTrimmedName())) {
+        statuses.remove(0);
+      }
+      return statuses;
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index 828c9e9..56fff9f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.audit.AuditEventStatus;
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.AuditMessage;
 import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -38,6 +39,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMReque
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.ozone.security.acl.RequestContext;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -187,6 +190,51 @@ public abstract class OMClientRequest implements RequestAuditor {
   }
 
   /**
+   * Check Acls for the ozone key.
+   * @param ozoneManager
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @throws IOException
+   */
+  protected void checkACLs(OzoneManager ozoneManager, String volumeName,
+      String bucketName, String keyName, IAccessAuthorizer.ACLType aclType)
+      throws IOException {
+
+    // TODO: Presently not populating sub-paths under a single bucket
+    //  lock. Need to revisit this to handle any concurrent operations
+    //  along with this.
+    OzonePrefixPathImpl pathViewer = new OzonePrefixPathImpl(volumeName,
+        bucketName, keyName, ozoneManager.getKeyManager());
+
+    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
+        .setResType(OzoneObj.ResourceType.KEY)
+        .setStoreType(OzoneObj.StoreType.OZONE)
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setOzonePrefixPath(pathViewer).build();
+
+    boolean isDirectory = pathViewer.getOzoneFileStatus().isDirectory();
+
+    RequestContext.Builder contextBuilder = RequestContext.newBuilder()
+        .setAclRights(aclType)
+        .setRecursiveAccessCheck(isDirectory); // recursive checks for a dir
+
+    // check Acl
+    if (ozoneManager.getAclsEnabled()) {
+      String volumeOwner = ozoneManager.getVolumeOwner(obj.getVolumeName(),
+          contextBuilder.getAclRights(), obj.getResourceType());
+      contextBuilder.setClientUgi(createUGI());
+      contextBuilder.setIp(getRemoteAddress());
+      contextBuilder.setHost(getHostName());
+      contextBuilder.setAclType(IAccessAuthorizer.ACLIdentityType.USER);
+      contextBuilder.setOwnerName(volumeOwner);
+      ozoneManager.checkAcls(obj, contextBuilder.build(), true);
+    }
+  }
+
+  /**
    * Check Acls of ozone object with volOwner given.
    * @param ozoneManager
    * @param resType
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index e42bc6b..27eda71 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -52,8 +52,10 @@ import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nonnull;
 
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
 
 /**
  * Base class for file requests.
@@ -607,6 +609,8 @@ public final class OMFileRequest {
       OMMetadataManager omMetadataMgr, String volumeName, String bucketName,
       String keyName, long scmBlockSize) throws IOException {
 
+    OMFileRequest.validateBucket(omMetadataMgr, volumeName, bucketName);
+
     Path keyPath = Paths.get(keyName);
     Iterator<Path> elements = keyPath.iterator();
     String bucketKey = omMetadataMgr.getBucketKey(volumeName, bucketName);
@@ -932,4 +936,36 @@ public final class OMFileRequest {
 
     return lastKnownParentId;
   }
+
+  /**
+   * Validates volume and bucket existence.
+   *
+   * @param metadataManager
+   * @param volumeName
+   * @param bucketName
+   * @throws IOException
+   */
+  public static void validateBucket(OMMetadataManager metadataManager,
+      String volumeName, String bucketName)
+      throws IOException {
+
+    String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+    // Check if bucket exists
+    if (metadataManager.getBucketTable().get(bucketKey) == null) {
+      String volumeKey = metadataManager.getVolumeKey(volumeName);
+      // If the volume also does not exist, we should throw volume not found
+      // exception
+      if (metadataManager.getVolumeTable().get(volumeKey) == null) {
+        LOG.error("volume not found: {}", volumeName);
+        throw new OMException("Volume not found",
+            VOLUME_NOT_FOUND);
+      }
+
+      // if the volume exists but bucket does not exist, throw bucket not found
+      // exception
+      LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
+      throw new OMException("Bucket not found",
+          BUCKET_NOT_FOUND);
+    }
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
index af5c4df..dbf5645 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
@@ -43,7 +42,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteK
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -94,16 +92,14 @@ public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest {
     boolean acquiredLock = false;
     OMClientResponse omClientResponse = null;
     Result result = null;
-    OmVolumeArgs omVolumeArgs = null;
     OmBucketInfo omBucketInfo = null;
     try {
       keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
       volumeName = keyArgs.getVolumeName();
       bucketName = keyArgs.getBucketName();
 
-      // check Acl
-      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
-          IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY);
+      checkACLs(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.DELETE);
 
       acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
           volumeName, bucketName);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
index 56dcd6b..4ab9d3a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
@@ -38,7 +38,14 @@ import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponseV1;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .RenameKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .RenameKeyResponse;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.slf4j.Logger;
@@ -102,8 +109,12 @@ public class OMKeyRenameRequestV1 extends OMKeyRenameRequest {
 
       // check Acls to see if user has access to perform delete operation on
       // old key and create operation on new key
-      checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName,
-              IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY);
+
+      // check Acl fromKeyName
+      checkACLs(ozoneManager, volumeName, bucketName, fromKeyName,
+          IAccessAuthorizer.ACLType.DELETE);
+
+      // check Acl toKeyName
       checkKeyAcls(ozoneManager, volumeName, bucketName, toKeyName,
               IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
index 2c43d51..7d58d08 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
@@ -20,11 +20,21 @@ package org.apache.hadoop.ozone.om.request.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.security.acl.OzonePrefixPath;
 import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
 
 /**
  * Tests OmKeyDelete request layout version V1.
@@ -52,6 +62,7 @@ public class TestOMKeyDeleteRequestV1 extends TestOMKeyDeleteRequest {
                     HddsProtos.ReplicationFactor.ONE,
                     parentId + 1,
                     parentId, 100, Time.now());
+    omKeyInfo.setKeyName(fileName);
     TestOMRequestUtils.addFileToKeyTable(false, false,
             fileName, omKeyInfo, -1, 50, omMetadataManager);
     return omKeyInfo.getPath();
@@ -66,4 +77,61 @@ public class TestOMKeyDeleteRequestV1 extends TestOMKeyDeleteRequest {
     OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
+
+  @Test
+  public void testOzonePrefixPathViewer() throws Exception {
+    // Add volume, bucket and key entries to OM DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+        omMetadataManager);
+
+    String ozoneKey = addKeyToTable();
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
+
+    // As we added manually to key table.
+    Assert.assertNotNull(omKeyInfo);
+
+    // OzonePrefixPathImpl on a directory
+    OzonePrefixPathImpl ozonePrefixPath = new OzonePrefixPathImpl(volumeName,
+        bucketName, "c", keyManager);
+    OzoneFileStatus status = ozonePrefixPath.getOzoneFileStatus();
+    Assert.assertNotNull(status);
+    Assert.assertEquals("c", status.getTrimmedName());
+    Assert.assertTrue(status.isDirectory());
+    verifyPath(ozonePrefixPath, "c", "c/d");
+    verifyPath(ozonePrefixPath, "c/d", "c/d/e");
+    verifyPath(ozonePrefixPath, "c/d/e", "c/d/e/file1");
+
+    try {
+      ozonePrefixPath.getChildren("c/d/e/file1");
+      Assert.fail("Should throw INVALID_KEY_NAME as the given path is a file.");
+    } catch (OMException ome) {
+      Assert.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME,
+          ome.getResult());
+    }
+
+    // OzonePrefixPathImpl on a file
+    ozonePrefixPath = new OzonePrefixPathImpl(volumeName,
+        bucketName, "c/d/e/file1", keyManager);
+    status = ozonePrefixPath.getOzoneFileStatus();
+    Assert.assertNotNull(status);
+    Assert.assertEquals("c/d/e/file1", status.getTrimmedName());
+    Assert.assertEquals("c/d/e/file1", status.getKeyInfo().getKeyName());
+    Assert.assertTrue(status.isFile());
+  }
+
+  private void verifyPath(OzonePrefixPath ozonePrefixPath, String pathName,
+                          String expectedPath)
+      throws IOException {
+    Iterator<? extends OzoneFileStatus> pathItr = ozonePrefixPath.getChildren(
+        pathName);
+    Assert.assertTrue("Failed to list keyPaths", pathItr.hasNext());
+    Assert.assertEquals(expectedPath, pathItr.next().getTrimmedName());
+    try{
+      pathItr.next();
+      Assert.fail("Reached end of the list!");
+    } catch (NoSuchElementException nse){
+      // expected
+    }
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObj.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObj.java
new file mode 100644
index 0000000..ab4e60f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObj.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.security.acl;
+
+import org.apache.hadoop.ozone.om.KeyManager;
+import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.mock;
+
+public class TestOzoneObj {
+
+
+  private OzoneObjInfo objInfo;
+  private OzoneObjInfo.Builder builder;
+  private String volume = "vol1";
+  private String bucket = "bucket1";
+  private String key = "key1";
+  private static final OzoneObj.StoreType STORE = OzoneObj.StoreType.OZONE;
+
+  @Test
+  public void testGetPathViewer() throws IOException {
+
+    builder = getBuilder(volume, bucket, key);
+    objInfo = builder.build();
+    assertEquals(objInfo.getVolumeName(), volume);
+    assertNotNull("unexpected path accessor",
+        objInfo.getOzonePrefixPathViewer());
+
+    objInfo = getBuilder(null, null, null).build();
+    assertEquals(objInfo.getVolumeName(), null);
+    assertNotNull("unexpected path accessor",
+        objInfo.getOzonePrefixPathViewer());
+
+    objInfo = getBuilder(volume, null, null).build();
+    assertEquals(objInfo.getVolumeName(), volume);
+    assertNotNull("unexpected path accessor",
+        objInfo.getOzonePrefixPathViewer());
+
+  }
+
+  private OzoneObjInfo.Builder getBuilder(String withVolume,
+      String withBucket, String withKey) throws IOException {
+
+    KeyManager mockKeyManager = mock(KeyManager.class);
+    OzonePrefixPath prefixPathViewer = new OzonePrefixPathImpl("vol1",
+        "buck1", "file", mockKeyManager);
+
+    return OzoneObjInfo.Builder.newBuilder()
+        .setResType(OzoneObj.ResourceType.VOLUME)
+        .setStoreType(STORE)
+        .setVolumeName(withVolume)
+        .setBucketName(withBucket)
+        .setKeyName(withKey)
+        .setOzonePrefixPath(prefixPathViewer);
+  }
+
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestRequestContext.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestRequestContext.java
index b8b0363..5e76e09 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestRequestContext.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestRequestContext.java
@@ -20,13 +20,15 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.IOException;
+
 /**
  * Test request context.
  */
 public class TestRequestContext {
 
   @Test
-  public void testRecursiveAccessFlag() {
+  public void testRecursiveAccessFlag() throws IOException {
     RequestContext context = getUserRequestContext("om",
             IAccessAuthorizer.ACLType.CREATE, false, "volume1",
             true);
@@ -78,7 +80,8 @@ public class TestRequestContext {
 
   private RequestContext getUserRequestContext(String username,
       IAccessAuthorizer.ACLType type, boolean isOwner, String ownerName,
-      boolean recursiveAccessCheck) {
+      boolean recursiveAccessCheck) throws IOException {
+
     return RequestContext.getBuilder(
             UserGroupInformation.createRemoteUser(username), null, null,
             type, ownerName, recursiveAccessCheck).build();

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 28/29: HDDS-4495. [FSO]Delete : Implement async cleanup of garbage and orphan sub-dirs/files (#2093)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 055771e1319bccb4f69e62f9bde7e7fbbbdf66ca
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Tue Apr 6 15:36:18 2021 +0530

    HDDS-4495. [FSO]Delete : Implement async cleanup of garbage and orphan sub-dirs/files (#2093)
---
 .../common/src/main/resources/ozone-default.xml    |  19 ++
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |   1 +
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |  13 +
 .../TestDirectoryDeletingServiceWithFSOBucket.java | 318 +++++++++++++++++++++
 .../src/main/proto/OmClientProtocol.proto          |  15 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |   8 +
 .../hadoop/ozone/om/DirectoryDeletingService.java  | 286 ++++++++++++++++++
 .../org/apache/hadoop/ozone/om/KeyManager.java     |  56 ++++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 112 ++++++++
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  14 +
 .../hadoop/ozone/om/codec/OMDBDefinition.java      |   9 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   3 +
 .../ozone/om/request/key/OMKeyDeleteRequestV1.java |   6 +-
 .../om/request/key/OMPathsPurgeRequestV1.java      |  64 +++++
 .../om/response/key/OMKeyDeleteResponseV1.java     |  12 +-
 .../om/response/key/OMPathsPurgeResponseV1.java    | 121 ++++++++
 .../om/response/key/TestOMKeyDeleteResponseV1.java |   4 +-
 17 files changed, 1053 insertions(+), 8 deletions(-)

diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 89e07de..665ccd9 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2770,4 +2770,23 @@
       existing unit test cases won't be affected. New OM version should be 'V1'
     </description>
   </property>
+  <property>
+    <name>ozone.directory.deleting.service.interval</name>
+    <value>1m</value>
+    <tag>OZONE, PERFORMANCE, OM</tag>
+    <description>Time interval of the directory deleting service. It runs on OM
+      periodically and cleanup orphan directory and its sub-tree. For every
+      orphan directory it deletes the sub-path tree structure(dirs/files). It
+      sends sub-files to KeyDeletingService to deletes its blocks. Unit could
+      be defined with postfix (ns,ms,s,m,h,d)
+    </description>
+  </property>
+  <property>
+    <name>ozone.path.deleting.limit.per.task</name>
+    <value>10000</value>
+    <tag>OZONE, PERFORMANCE, OM</tag>
+    <description>A maximum number of paths(dirs/files) to be deleted by
+      directory deleting service per time interval.
+    </description>
+  </property>
 </configuration>
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index c389bec..5fedd83 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -289,6 +289,7 @@ public final class OmUtils {
     case PurgeKeys:
     case RecoverTrash:
     case DeleteOpenKeys:
+    case PurgePaths:
       return false;
     default:
       LOG.error("CmdType {} is not categorized as readOnly or not.", cmdType);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 71344f9..6a64818 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -253,4 +253,17 @@ public final class OMConfigKeys {
   public static final String OZONE_OM_LAYOUT_VERSION_DEFAULT = "V0";
 
   public static final String OZONE_OM_LAYOUT_VERSION_V1 = "V1";
+
+  /**
+   * Configuration properties for Directory Deleting Service.
+   */
+  public static final String OZONE_DIR_DELETING_SERVICE_INTERVAL =
+      "ozone.directory.deleting.service.interval";
+  public static final String OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT
+      = "60s";
+
+  public static final String OZONE_PATH_DELETING_LIMIT_PER_TASK =
+      "ozone.path.deleting.limit.per.task";
+  public static final int OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT = 10000;
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSOBucket.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSOBucket.java
new file mode 100644
index 0000000..aa2d8e7
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSOBucket.java
@@ -0,0 +1,318 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.DirectoryDeletingService;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.Assert.fail;
+
+/**
+ * Directory deletion service test cases.
+ */
+public class TestDirectoryDeletingServiceWithFSOBucket {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestDirectoryDeletingServiceWithFSOBucket.class);
+
+  /**
+   * Set a timeout for each test.
+   */
+  @Rule
+  public Timeout timeout = Timeout.seconds(300);
+
+  private static boolean isBucketFSOptimized = true;
+  private static boolean enabledFileSystemPaths = true;
+  private static boolean omRatisEnabled = true;
+
+  private static MiniOzoneCluster cluster;
+  private static FileSystem fs;
+  private static String volumeName;
+  private static String bucketName;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 3);
+    conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+    if (isBucketFSOptimized) {
+      TestOMRequestUtils.configureFSOptimizedPaths(conf,
+          enabledFileSystemPaths, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+    } else {
+      conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+          enabledFileSystemPaths);
+    }
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+
+    fs = FileSystem.get(conf);
+  }
+
+  @AfterClass
+  public static void teardown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  @After
+  public void cleanup() {
+    try {
+      Path root = new Path("/");
+      FileStatus[] fileStatuses = fs.listStatus(root);
+      for (FileStatus fileStatus : fileStatuses) {
+        fs.delete(fileStatus.getPath(), true);
+      }
+    } catch (IOException ex) {
+      fail("Failed to cleanup files.");
+    }
+  }
+
+  @Test
+  public void testDeleteEmptyDirectory() throws Exception {
+    Path root = new Path("/rootDir");
+    Path appRoot = new Path(root, "appRoot");
+    fs.mkdirs(appRoot);
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+
+
+    DirectoryDeletingService dirDeletingService =
+        (DirectoryDeletingService) cluster.getOzoneManager().getKeyManager()
+            .getDirDeletingService();
+    // Before delete
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(dirTable, 2);
+
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 0);
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 0);
+
+    // Delete the appRoot, empty dir
+    fs.delete(appRoot, true);
+
+    // After Delete
+    checkPath(appRoot);
+
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(dirTable, 1);
+
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 1);
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 0);
+
+    Assert.assertTrue(dirTable.iterator().hasNext());
+    Assert.assertEquals(root.getName(),
+        dirTable.iterator().next().getValue().getName());
+
+    Assert.assertTrue(dirDeletingService.getRunCount() > 1);
+  }
+
+  /**
+   * Tests verifies that directories and files are getting purged in multiple
+   * batches.
+   */
+  @Test
+  public void testDeleteWithLargeSubPathsThanBatchSize() throws Exception {
+    Path root = new Path("/rootDir");
+    Path appRoot = new Path(root, "appRoot");
+    // Creates 2 parent dirs from root.
+    fs.mkdirs(appRoot);
+
+    // create 2 more levels. In each level, creates 5 subdirs and 5 subfiles.
+    // This will create total of 3 parentDirs + (3 * 5) childDirs and
+    // Total of (3 * 5) childFiles
+    for (int i = 1; i <= 3; i++) {
+      Path childDir = new Path(appRoot, "parentDir" + i);
+      for (int j = 1; j <= 5; j++) {
+        // total 5 sub-dirs + 5 sub-files = 10 items in this level.
+        Path childSubDir = new Path(childDir, "childDir" + j);
+        Path childSubFile = new Path(childDir, "childFile" + j);
+        ContractTestUtils.touch(fs, childSubFile); // create sub file
+        fs.mkdirs(childSubDir); // create sub dir
+      }
+    }
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmKeyInfo> keyTable =
+        cluster.getOzoneManager().getMetadataManager().getKeyTable();
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+
+    DirectoryDeletingService dirDeletingService =
+        (DirectoryDeletingService) cluster.getOzoneManager().getKeyManager()
+            .getDirDeletingService();
+
+    // Before delete
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(keyTable, 15);
+    assertTableRowCount(dirTable, 20);
+
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 0);
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 0);
+
+    // Delete the appRoot
+    fs.delete(appRoot, true);
+
+    // After Delete
+    checkPath(appRoot);
+
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(keyTable, 0);
+    assertTableRowCount(dirTable, 1);
+
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 15);
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 19);
+
+    Assert.assertTrue(dirDeletingService.getRunCount() > 1);
+  }
+
+  @Test
+  public void testDeleteWithMultiLevels() throws Exception {
+    Path root = new Path("/rootDir");
+    Path appRoot = new Path(root, "appRoot");
+
+    for (int i = 1; i <= 3; i++) {
+      Path parent = new Path(appRoot, "parentDir" + i);
+      Path child = new Path(parent, "childFile");
+      ContractTestUtils.touch(fs, child);
+    }
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmKeyInfo> keyTable =
+        cluster.getOzoneManager().getMetadataManager().getKeyTable();
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+
+    DirectoryDeletingService dirDeletingService =
+        (DirectoryDeletingService) cluster.getOzoneManager().getKeyManager()
+            .getDirDeletingService();
+
+    // Before delete
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(dirTable, 5);
+    assertTableRowCount(keyTable, 3);
+
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 0);
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 0);
+
+    // Delete the rootDir, which should delete all keys.
+    fs.delete(root, true);
+
+    // After Delete
+    checkPath(root);
+
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(keyTable, 0);
+    assertTableRowCount(dirTable, 0);
+
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 3);
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 5);
+
+    Assert.assertTrue(dirDeletingService.getRunCount() > 1);
+  }
+
+  private void assertSubPathsCount(long pathCount, long expectedCount)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(() -> pathCount >= expectedCount, 1000, 120000);
+  }
+
+  private void assertTableRowCount(Table<String, ?> table, int count)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(() -> assertTableRowCount(count, table), 1000,
+        120000); // 2 minutes
+  }
+
+  private boolean assertTableRowCount(int expectedCount,
+                                      Table<String, ?> table) {
+    long count = 0L;
+    try {
+      count = cluster.getOzoneManager().getMetadataManager()
+          .countRowsInTable(table);
+      LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+          count, expectedCount);
+    } catch (IOException ex) {
+      fail("testDoubleBuffer failed with: " + ex);
+    }
+    return count == expectedCount;
+  }
+
+  private void checkPath(Path path) {
+    try {
+      fs.getFileStatus(path);
+      fail("testRecursiveDelete failed");
+    } catch (IOException ex) {
+      Assert.assertTrue(ex instanceof FileNotFoundException);
+      Assert.assertTrue(ex.getMessage().contains("No such file or directory"));
+    }
+  }
+
+}
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 2bae4e5..3bacb0e 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -93,6 +93,8 @@ enum Type {
 
   ListTrash = 91;
   RecoverTrash = 92;
+
+  PurgePaths = 93;
 }
 
 message OMRequest {
@@ -165,6 +167,8 @@ message OMRequest {
 
   optional ListTrashRequest                 listTrashRequest               = 91;
   optional RecoverTrashRequest              RecoverTrashRequest            = 92;
+
+  optional PurgePathsRequest                purgePathsRequest              = 93;
 }
 
 message OMResponse {
@@ -235,6 +239,7 @@ message OMResponse {
 
   optional ListTrashResponse                  listTrashResponse            = 91;
   optional RecoverTrashResponse               RecoverTrashResponse         = 92;
+  optional PurgePathsResponse                 purgePathsResponse           = 93;
 }
 
 enum Status {
@@ -956,6 +961,16 @@ message PurgeKeysResponse {
 
 }
 
+message PurgePathsRequest {
+    repeated string deletedDirs = 1;
+    repeated KeyInfo deletedSubFiles = 2;
+    repeated KeyInfo markDeletedSubDirs = 3;
+}
+
+message PurgePathsResponse {
+
+}
+
 message DeleteOpenKeysRequest {
   repeated OpenKeyBucket openKeysPerBucket = 1;
 }
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 4c66040..bcbef0c 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -436,4 +436,12 @@ public interface OMMetadataManager extends DBStoreHAManager {
    * @return bytes of DB key.
    */
   String getMultipartKey(long parentObjectId, String fileName, String uploadId);
+
+  /**
+   * Get Deleted Directory Table.
+   *
+   * @return Deleted Directory Table.
+   */
+  Table<String, OmKeyInfo> getDeletedDirTable();
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DirectoryDeletingService.java
new file mode 100644
index 0000000..ec5c3a9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DirectoryDeletingService.java
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.hdds.utils.BackgroundService;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT;
+
+/**
+ * This is a background service to delete orphan directories and its
+ * sub paths(sub-dirs and sub-files).
+ *
+ * <p>
+ * This will scan the metadata of om periodically to get the orphan dirs from
+ * DeletedDirectoryTable and find its sub paths. It will fetch all sub-files
+ * from KeyTable and move those to DeletedTable so that OM's
+ * KeyDeletingService will cleanup those files later. It will fetch all
+ * sub-directories from the DirectoryTable and move those to
+ * DeletedDirectoryTable so that these will be visited in next iterations.
+ *
+ * <p>
+ * After moving all sub-files and sub-dirs the parent orphan directory will be
+ * deleted by this service. It will continue traversing until all the leaf path
+ * components of an orphan directory is visited.
+ */
+public class DirectoryDeletingService extends BackgroundService {
+
+  private final KeyManager keyManager;
+  private final OzoneManager ozoneManager;
+  private AtomicLong deletedDirsCount;
+  private AtomicLong deletedFilesCount;
+  private final AtomicLong runCount;
+
+  private static ClientId clientId = ClientId.randomId();
+
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int DIR_DELETING_CORE_POOL_SIZE = 1;
+
+  // Number of items(dirs/files) to be batched in an iteration.
+  private final long pathLimitPerTask;
+
+  public DirectoryDeletingService(long interval, TimeUnit unit,
+      long serviceTimeout, OzoneManager ozoneManager) {
+    super("DirectoryDeletingService", interval, unit,
+        DIR_DELETING_CORE_POOL_SIZE, serviceTimeout);
+    this.keyManager = ozoneManager.getKeyManager();
+    this.ozoneManager = ozoneManager;
+    this.deletedDirsCount = new AtomicLong(0);
+    this.deletedFilesCount = new AtomicLong(0);
+    this.runCount = new AtomicLong(0);
+    this.pathLimitPerTask = ozoneManager.getConfiguration()
+        .getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK,
+            OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT);
+  }
+
+  private boolean shouldRun() {
+    if (ozoneManager == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return ozoneManager.isLeaderReady();
+  }
+
+  private boolean isRatisEnabled() {
+    if (ozoneManager == null) {
+      return false;
+    }
+    return ozoneManager.isRatisEnabled();
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new DirectoryDeletingService.DirDeletingTask());
+    return queue;
+  }
+
+  private class DirDeletingTask implements BackgroundTask {
+
+    @Override
+    public int getPriority() {
+      return 0;
+    }
+
+    @Override
+    public BackgroundTaskResult call() throws Exception {
+      if (shouldRun()) {
+        runCount.incrementAndGet();
+        long count = pathLimitPerTask;
+        try {
+          long startTime = Time.monotonicNow();
+          // step-1) Get one pending deleted directory
+          OmKeyInfo pendingDeletedDirInfo = keyManager.getPendingDeletionDir();
+          if (pendingDeletedDirInfo != null) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Pending deleted dir name: {}",
+                  pendingDeletedDirInfo.getKeyName());
+            }
+            // step-1: get all sub directories under the deletedDir
+            List<OmKeyInfo> dirs =
+                keyManager.getPendingDeletionSubDirs(pendingDeletedDirInfo,
+                    count);
+            count = count - dirs.size();
+            List<OmKeyInfo> deletedSubDirList = new ArrayList<>();
+            for (OmKeyInfo dirInfo : dirs) {
+              deletedSubDirList.add(dirInfo);
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("deleted sub dir name: {}",
+                    dirInfo.getKeyName());
+              }
+            }
+
+            // step-2: get all sub files under the deletedDir
+            List<OmKeyInfo> purgeDeletedFiles =
+                keyManager.getPendingDeletionSubFiles(pendingDeletedDirInfo,
+                    count);
+            count = count - purgeDeletedFiles.size();
+
+            if (LOG.isDebugEnabled()) {
+              for (OmKeyInfo fileInfo : purgeDeletedFiles) {
+                LOG.debug("deleted sub file name: {}", fileInfo.getKeyName());
+              }
+            }
+
+            // step-3: Since there is a boundary condition of 'numEntries' in
+            // each batch, check whether the sub paths count reached batch size
+            // limit. If count reached limit then there can be some more child
+            // paths to be visited and will keep the parent deleted directory
+            // for one more pass.
+            List<String> purgeDeletedDirs = new ArrayList<>();
+            if (count > 0) {
+              // TODO: Now, there is only one entry in this list. Maintained
+              //  list data structure becuase this can be extended to add
+              //  more directories within the batchSize limit.
+              purgeDeletedDirs.add(pendingDeletedDirInfo.getPath());
+            }
+
+            if (isRatisEnabled()) {
+              submitPurgePaths(purgeDeletedDirs, purgeDeletedFiles,
+                  deletedSubDirList);
+            }
+            // TODO: need to handle delete with non-ratis
+
+            deletedDirsCount.addAndGet(purgeDeletedDirs.size());
+            deletedFilesCount.addAndGet(purgeDeletedFiles.size());
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Number of dirs deleted: {}, Number of files moved:" +
+                      " {} to DeletedTable, elapsed time: {}ms",
+                  deletedDirsCount, deletedFilesCount,
+                  Time.monotonicNow() - startTime);
+            }
+          }
+        } catch (IOException e) {
+          LOG.error("Error while running delete directories and files " +
+              "background task. Will retry at next run.", e);
+        }
+      }
+
+      // place holder by returning empty results of this call back.
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+
+  /**
+   * Returns the number of dirs deleted by the background service.
+   *
+   * @return Long count.
+   */
+  @VisibleForTesting
+  public long getDeletedDirsCount() {
+    return deletedDirsCount.get();
+  }
+
+  /**
+   * Returns the number of files moved to DeletedTable by the background
+   * service.
+   *
+   * @return Long count.
+   */
+  @VisibleForTesting
+  public long getMovedFilesCount() {
+    return deletedFilesCount.get();
+  }
+
+  /**
+   * Returns the number of times this Background service has run.
+   *
+   * @return Long, run count.
+   */
+  @VisibleForTesting
+  public long getRunCount() {
+    return runCount.get();
+  }
+
+  private int submitPurgePaths(List<String> purgeDeletedDirs,
+      List<OmKeyInfo> purgeDeletedFiles, List<OmKeyInfo> markDirsAsDeleted) {
+    // Put all keys to be purged in a list
+    int deletedCount = 0;
+    OzoneManagerProtocolProtos.PurgePathsRequest.Builder purgePathsRequest =
+        OzoneManagerProtocolProtos.PurgePathsRequest.newBuilder();
+    for (String purgeDir : purgeDeletedDirs) {
+      purgePathsRequest.addDeletedDirs(purgeDir);
+    }
+    for (OmKeyInfo purgeFile : purgeDeletedFiles) {
+      purgePathsRequest.addDeletedSubFiles(
+          purgeFile.getProtobuf(CURRENT_VERSION));
+    }
+
+    // Add these directories to deletedDirTable, so that its sub-paths will be
+    // traversed in next iteration to ensure cleanup all sub-children.
+    for (OmKeyInfo dir : markDirsAsDeleted) {
+      purgePathsRequest.addMarkDeletedSubDirs(dir.getProtobuf(CURRENT_VERSION));
+    }
+
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        OzoneManagerProtocolProtos.OMRequest.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.PurgePaths)
+            .setPurgePathsRequest(purgePathsRequest)
+            .setClientId(clientId.toString())
+            .build();
+
+    // Submit Purge paths request to OM
+    try {
+      RaftClientRequest raftClientRequest =
+          createRaftClientRequestForDelete(omRequest);
+      ozoneManager.getOmRatisServer().submitRequest(omRequest,
+          raftClientRequest);
+    } catch (ServiceException e) {
+      LOG.error("PurgePaths request failed. Will retry at next run.");
+      return 0;
+    }
+    return deletedCount;
+  }
+
+
+  private RaftClientRequest createRaftClientRequestForDelete(
+      OzoneManagerProtocolProtos.OMRequest omRequest) {
+    return RaftClientRequest.newBuilder()
+        .setClientId(clientId)
+        .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId())
+        .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId())
+        .setCallId(runCount.get())
+        .setMessage(
+            Message.valueOf(
+                OMRatisHelper.convertRequestToByteString(omRequest)))
+        .setType(RaftClientRequest.writeRequestType())
+        .build();
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
index 658f503..b569b5d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
@@ -273,4 +273,60 @@ public interface KeyManager extends OzoneManagerFS, IOzoneAcl {
    * @param key
    */
   void refresh(OmKeyInfo key) throws IOException;
+
+  /**
+   * Assume OM has FS namespace like below, deleteDirTable stores absolute
+   * path name as existing KeyDeletionService expects full key name.
+   * For example, if user deletes directory 'd1' then the entry in OM DB looks
+   * like, DBKey = 1030/d3 and DBValue = KeyInfo with keyName "a/b2/d3"
+   *
+   *                   vol1
+   *                    |
+   *                  buck-1
+   *                    |
+   *                    a
+   *                    |
+   *      -----------------------------------
+   *     |             |                     |
+   *     b1            b2                    b3
+   *   -----       ---------               ----------
+   *   |    |      |    |   |             |    |     |
+   *  c1   c2     d1   d2  d3             e1   e2   e3
+   *                   |                  |
+   *               --------               |
+   *              |        |              |
+   *           d21.txt   d22.txt        e11.txt
+   *
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  OmKeyInfo getPendingDeletionDir() throws IOException;
+
+  /**
+   * Returns all sub directories under the given parent directory.
+   *
+   * @param parentInfo
+   * @param numEntries
+   * @return list of dirs
+   * @throws IOException
+   */
+  List<OmKeyInfo> getPendingDeletionSubDirs(OmKeyInfo parentInfo,
+      long numEntries) throws IOException;
+
+  /**
+   * Returns all sub files under the given parent directory.
+   *
+   * @param parentInfo
+   * @param numEntries
+   * @return list of files
+   * @throws IOException
+   */
+  List<OmKeyInfo> getPendingDeletionSubFiles(OmKeyInfo parentInfo,
+      long numEntries) throws IOException;
+
+  /**
+   * Returns the instance of Directory Deleting Service.
+   * @return Background service.
+   */
+  BackgroundService getDirDeletingService();
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 422a915..38a4307 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -134,6 +134,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAU
 import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND;
@@ -176,6 +178,7 @@ public class KeyManagerImpl implements KeyManager {
   private final PrefixManager prefixManager;
 
   private final boolean enableFileSystemPaths;
+  private BackgroundService dirDeletingService;
 
 
   @VisibleForTesting
@@ -250,6 +253,22 @@ public class KeyManagerImpl implements KeyManager {
           serviceTimeout, configuration);
       keyDeletingService.start();
     }
+
+    // Start directory deletion service for FSO buckets.
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()
+        && dirDeletingService == null) {
+      long dirDeleteInterval = configuration.getTimeDuration(
+          OZONE_DIR_DELETING_SERVICE_INTERVAL,
+          OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT,
+          TimeUnit.MILLISECONDS);
+      long serviceTimeout = configuration.getTimeDuration(
+          OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
+          OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
+          TimeUnit.MILLISECONDS);
+      dirDeletingService = new DirectoryDeletingService(dirDeleteInterval,
+          TimeUnit.SECONDS, serviceTimeout, ozoneManager);
+      dirDeletingService.start();
+    }
   }
 
   KeyProviderCryptoExtension getKMSProvider() {
@@ -262,6 +281,10 @@ public class KeyManagerImpl implements KeyManager {
       keyDeletingService.shutdown();
       keyDeletingService = null;
     }
+    if (dirDeletingService != null) {
+      dirDeletingService.shutdown();
+      dirDeletingService = null;
+    }
   }
 
   private OmBucketInfo getBucketInfo(String volumeName, String bucketName)
@@ -980,6 +1003,11 @@ public class KeyManagerImpl implements KeyManager {
   }
 
   @Override
+  public BackgroundService getDirDeletingService() {
+    return dirDeletingService;
+  }
+
+  @Override
   public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws
       IOException {
     Preconditions.checkNotNull(omKeyArgs);
@@ -2894,4 +2922,88 @@ public class KeyManagerImpl implements KeyManager {
     }
     return nodeSet;
   }
+
+  @Override
+  public OmKeyInfo getPendingDeletionDir() throws IOException {
+    OmKeyInfo omKeyInfo = null;
+    try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+             deletedDirItr = metadataManager.getDeletedDirTable().iterator()) {
+      if (deletedDirItr.hasNext()) {
+        Table.KeyValue<String, OmKeyInfo> keyValue = deletedDirItr.next();
+        if (keyValue != null) {
+          omKeyInfo = keyValue.getValue();
+        }
+      }
+    }
+    return omKeyInfo;
+  }
+
+  @Override
+  public List<OmKeyInfo> getPendingDeletionSubDirs(OmKeyInfo parentInfo,
+      long numEntries) throws IOException {
+    List<OmKeyInfo> directories = new ArrayList<>();
+    String seekDirInDB = metadataManager.getOzonePathKey(
+        parentInfo.getObjectID(), "");
+    long countEntries = 0;
+
+    Table dirTable = metadataManager.getDirectoryTable();
+    TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
+        iterator = dirTable.iterator();
+
+    iterator.seek(seekDirInDB);
+
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmDirectoryInfo dirInfo = iterator.value().getValue();
+      if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
+          parentInfo.getObjectID())) {
+        break;
+      }
+      String dirName = OMFileRequest.getAbsolutePath(parentInfo.getKeyName(),
+          dirInfo.getName());
+      OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(
+          parentInfo.getVolumeName(), parentInfo.getBucketName(), dirInfo,
+          dirName);
+      directories.add(omKeyInfo);
+      countEntries++;
+
+      // move to next entry in the DirTable
+      iterator.next();
+    }
+
+    return directories;
+  }
+
+  @Override
+  public List<OmKeyInfo> getPendingDeletionSubFiles(OmKeyInfo parentInfo,
+      long numEntries) throws IOException {
+    List<OmKeyInfo> files = new ArrayList<>();
+    String seekFileInDB = metadataManager.getOzonePathKey(
+        parentInfo.getObjectID(), "");
+    long countEntries = 0;
+
+    Table fileTable = metadataManager.getKeyTable();
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+        iterator = fileTable.iterator();
+
+    iterator.seek(seekFileInDB);
+
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmKeyInfo fileInfo = iterator.value().getValue();
+      if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(),
+          parentInfo.getObjectID())) {
+        break;
+      }
+      fileInfo.setFileName(fileInfo.getKeyName());
+      String fullKeyPath = OMFileRequest.getAbsolutePath(
+          parentInfo.getKeyName(), fileInfo.getKeyName());
+      fileInfo.setKeyName(fullKeyPath);
+
+      files.add(fileInfo);
+      countEntries++;
+      // move to next entry in the KeyTable
+      iterator.next();
+    }
+
+    return files;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 4e4f91b..b67346e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -137,6 +137,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * |  multipartFileInfoTable | parentId/fileName/uploadId ->...           |
    * |----------------------------------------------------------------------|
+   * |  deletedDirTable      | parentId/directoryName -> KeyInfo            |
+   * |----------------------------------------------------------------------|
    * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
    * |----------------------------------------------------------------------|
    */
@@ -155,6 +157,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String FILE_TABLE = "fileTable";
   public static final String OPEN_FILE_TABLE = "openFileTable";
   public static final String MULTIPARTFILEINFO_TABLE = "multipartFileInfoTable";
+  public static final String DELETED_DIR_TABLE = "deletedDirectoryTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
@@ -180,6 +183,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
   private Table<String, OmMultipartKeyInfo> multipartFileInfoTable;
+  private Table deletedDirTable;
 
   // Epoch is used to generate the objectIDs. The most significant 2 bits of
   // objectIDs is set to this epoch. For clusters before HDDS-4315 there is
@@ -256,6 +260,11 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   }
 
   @Override
+  public Table<String, OmKeyInfo> getDeletedDirTable() {
+    return deletedDirTable;
+  }
+
+  @Override
   public Table<String, OmKeyInfo> getOpenKeyTable() {
     if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       return openFileTable;
@@ -372,6 +381,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addTable(FILE_TABLE)
         .addTable(OPEN_FILE_TABLE)
         .addTable(MULTIPARTFILEINFO_TABLE)
+        .addTable(DELETED_DIR_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -454,6 +464,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
             String.class, OmMultipartKeyInfo.class);
     checkTableStatus(multipartFileInfoTable, MULTIPARTFILEINFO_TABLE);
 
+    deletedDirTable = this.store.getTable(DELETED_DIR_TABLE, String.class,
+        OmKeyInfo.class);
+    checkTableStatus(deletedDirTable, DELETED_DIR_TABLE);
+
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
         String.class, TransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index 77b9e04..f3716db 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -180,6 +180,12 @@ public class OMDBDefinition implements DBDefinition {
                   OmMultipartKeyInfo.class,
                   new OmMultipartKeyInfoCodec());
 
+  public static final DBColumnFamilyDefinition<String, OmKeyInfo>
+      DELETED_DIR_TABLE =
+      new DBColumnFamilyDefinition<>(OmMetadataManagerImpl.DELETED_DIR_TABLE,
+          String.class, new StringCodec(), OmKeyInfo.class,
+          new OmKeyInfoCodec(true));
+
   @Override
   public String getName() {
     return OzoneConsts.OM_DB_NAME;
@@ -196,7 +202,8 @@ public class OMDBDefinition implements DBDefinition {
         VOLUME_TABLE, OPEN_KEY_TABLE, KEY_TABLE,
         BUCKET_TABLE, MULTIPART_INFO_TABLE, PREFIX_TABLE, DTOKEN_TABLE,
         S3_SECRET_TABLE, TRANSACTION_INFO_TABLE, DIRECTORY_TABLE,
-        FILE_TABLE, OPEN_FILE_TABLE, MULTIPART_FILEINFO_TABLE};
+        FILE_TABLE, OPEN_FILE_TABLE, MULTIPART_FILEINFO_TABLE,
+        DELETED_DIR_TABLE};
   }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index b3e6c4f..f4628a9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeysRenameRequest;
+import org.apache.hadoop.ozone.om.request.key.OMPathsPurgeRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMTrashRecoverRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequestV1;
@@ -184,6 +185,8 @@ public final class OzoneManagerRatisUtils {
       return new OMFileCreateRequest(omRequest);
     case PurgeKeys:
       return new OMKeyPurgeRequest(omRequest);
+    case PurgePaths:
+      return new OMPathsPurgeRequestV1(omRequest);
     case InitiateMultiPartUpload:
       if (isBucketFSOptimized()) {
         return new S3InitiateMultipartUploadRequestV1(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
index dbf5645..87427f8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
@@ -160,9 +160,9 @@ public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest {
       // TODO: Revisit if we need it later.
 
       omClientResponse = new OMKeyDeleteResponseV1(omResponse
-              .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
-              omKeyInfo, ozoneManager.isRatisEnabled(),
-              omBucketInfo.copyObject(), keyStatus.isDirectory());
+          .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
+          keyName, omKeyInfo, ozoneManager.isRatisEnabled(),
+          omBucketInfo.copyObject(), keyStatus.isDirectory());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMPathsPurgeRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMPathsPurgeRequestV1.java
new file mode 100644
index 0000000..b9d6066
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMPathsPurgeRequestV1.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMPathsPurgeResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import java.util.List;
+
+/**
+ * Handles purging of keys from OM DB.
+ */
+public class OMPathsPurgeRequestV1 extends OMKeyRequest {
+
+  public OMPathsPurgeRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    OzoneManagerProtocolProtos.PurgePathsRequest purgePathsRequest =
+        getOmRequest().getPurgePathsRequest();
+
+    List<String> deletedDirsList = purgePathsRequest.getDeletedDirsList();
+    List<OzoneManagerProtocolProtos.KeyInfo> deletedSubFilesList =
+        purgePathsRequest.getDeletedSubFilesList();
+    List<OzoneManagerProtocolProtos.KeyInfo> markDeletedSubDirsList =
+        purgePathsRequest.getMarkDeletedSubDirsList();
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+
+    OMClientResponse omClientResponse = new OMPathsPurgeResponseV1(
+        omResponse.build(), markDeletedSubDirsList, deletedSubFilesList,
+        deletedDirsList, ozoneManager.isRatisEnabled());
+    addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+        omDoubleBufferHelper);
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java
index 15c1ba6..69e87df 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java
@@ -40,12 +40,14 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
 public class OMKeyDeleteResponseV1 extends OMKeyDeleteResponse {
 
   private boolean isDeleteDirectory;
+  private String keyName;
 
   public OMKeyDeleteResponseV1(@Nonnull OMResponse omResponse,
-      @Nonnull OmKeyInfo omKeyInfo, boolean isRatisEnabled,
-      @Nonnull OmBucketInfo omBucketInfo,
+      @Nonnull String keyName, @Nonnull OmKeyInfo omKeyInfo,
+      boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo,
       @Nonnull boolean isDeleteDirectory) {
     super(omResponse, omKeyInfo, isRatisEnabled, omBucketInfo);
+    this.keyName = keyName;
     this.isDeleteDirectory = isDeleteDirectory;
   }
 
@@ -69,6 +71,12 @@ public class OMKeyDeleteResponseV1 extends OMKeyDeleteResponse {
     if (isDeleteDirectory) {
       omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation,
               ozoneDbKey);
+      OmKeyInfo omKeyInfo = getOmKeyInfo();
+      // Sets full absolute key name to OmKeyInfo, which is
+      // required for moving the sub-files to KeyDeletionService.
+      omKeyInfo.setKeyName(keyName);
+      omMetadataManager.getDeletedDirTable().putWithBatch(
+          batchOperation, ozoneDbKey, omKeyInfo);
     } else {
       Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable();
       addDeletionToBatch(omMetadataManager, batchOperation, keyTable,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMPathsPurgeResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMPathsPurgeResponseV1.java
new file mode 100644
index 0000000..b6f5299
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMPathsPurgeResponseV1.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.key.OMPathsPurgeRequestV1;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+
+/**
+ * Response for {@link OMPathsPurgeRequestV1} request.
+ */
+@CleanupTableInfo(cleanupTables = {DELETED_TABLE, DELETED_DIR_TABLE,
+    DIRECTORY_TABLE, FILE_TABLE})
+public class OMPathsPurgeResponseV1 extends OMClientResponse {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMPathsPurgeResponseV1.class);
+
+  private List<OzoneManagerProtocolProtos.KeyInfo> markDeletedDirList;
+  private List<String> dirList;
+  private List<OzoneManagerProtocolProtos.KeyInfo> fileList;
+  private boolean isRatisEnabled;
+
+
+  public OMPathsPurgeResponseV1(@Nonnull OMResponse omResponse,
+      @Nonnull List<OzoneManagerProtocolProtos.KeyInfo> markDeletedDirs,
+      @Nonnull List<OzoneManagerProtocolProtos.KeyInfo> files,
+      @Nonnull List<String> dirs, boolean isRatisEnabled) {
+    super(omResponse);
+    this.markDeletedDirList = markDeletedDirs;
+    this.dirList = dirs;
+    this.fileList = files;
+    this.isRatisEnabled = isRatisEnabled;
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    // Add all sub-directories to deleted directory table.
+    for (OzoneManagerProtocolProtos.KeyInfo key : markDeletedDirList) {
+      OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(key);
+      String ozoneDbKey = omMetadataManager.getOzonePathKey(
+          keyInfo.getParentObjectID(), keyInfo.getFileName());
+      omMetadataManager.getDeletedDirTable().putWithBatch(batchOperation,
+          ozoneDbKey, keyInfo);
+
+      omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation,
+          ozoneDbKey);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("markDeletedDirList KeyName: {}, DBKey: {}",
+            keyInfo.getKeyName(), ozoneDbKey);
+      }
+    }
+
+    // Delete all the visited directories from deleted directory table
+    for (String key : dirList) {
+      omMetadataManager.getDeletedDirTable().deleteWithBatch(batchOperation,
+          key);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.info("Purge Deleted Directory DBKey: {}", key);
+      }
+    }
+    for (OzoneManagerProtocolProtos.KeyInfo key : fileList) {
+      OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(key);
+      String ozoneDbKey = omMetadataManager.getOzonePathKey(
+          keyInfo.getParentObjectID(), keyInfo.getFileName());
+      omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
+          ozoneDbKey);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.info("Move keyName:{} to DeletedTable DBKey: {}",
+            keyInfo.getKeyName(), ozoneDbKey);
+      }
+
+      RepeatedOmKeyInfo repeatedOmKeyInfo = null;
+      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(keyInfo,
+          repeatedOmKeyInfo, keyInfo.getUpdateID(), isRatisEnabled);
+
+      omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
+          keyInfo.getPath(), repeatedOmKeyInfo);
+
+    }
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
index d46fe72..4422527 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
@@ -36,8 +36,8 @@ public class TestOMKeyDeleteResponseV1 extends TestOMKeyDeleteResponse {
   @Override
   protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo,
       OzoneManagerProtocolProtos.OMResponse omResponse) {
-    return new OMKeyDeleteResponseV1(omResponse, omKeyInfo,
-            true, getOmBucketInfo(), false);
+    return new OMKeyDeleteResponseV1(omResponse, omKeyInfo.getKeyName(),
+        omKeyInfo, true, getOmBucketInfo(), false);
   }
 
   @Override

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 20/29: HDDS-4683. [FSO]ListKeys: do lookup in dir and file tables (#1954)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 2dbcaca4162adc5b3981ab205aa1fd5d712f7399
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Fri Feb 26 07:43:40 2021 +0530

    HDDS-4683. [FSO]ListKeys: do lookup in dir and file tables (#1954)
---
 .../apache/hadoop/ozone/client/OzoneBucket.java    | 299 ++++++++++++++++++++-
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  10 +-
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 261 +++++++++++++++++-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |   8 +-
 .../ozone/om/request/file/OMFileRequest.java       |   3 +-
 .../S3MultipartUploadCompleteRequest.java          |   1 -
 6 files changed, 575 insertions(+), 7 deletions(-)

diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index c1877b4..bfe6456 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -21,12 +21,14 @@ package org.apache.hadoop.ozone.client;
 import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.OzoneQuota;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.StorageType;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
@@ -35,7 +37,9 @@ import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.WithMetadata;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
@@ -43,13 +47,16 @@ import org.apache.hadoop.util.Time;
 
 import java.io.IOException;
 import java.time.Instant;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Stack;
 import java.util.NoSuchElementException;
 
 import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 
 /**
  * A class that encapsulates OzoneBucket.
@@ -544,6 +551,10 @@ public class OzoneBucket extends WithMetadata {
    */
   public Iterator<? extends OzoneKey> listKeys(String keyPrefix,
       String prevKey) throws IOException {
+
+    if(OzoneFSUtils.isFSOptimizedBucket(getMetadata())){
+      return new KeyIteratorV1(keyPrefix, prevKey);
+    }
     return new KeyIterator(keyPrefix, prevKey);
   }
 
@@ -788,6 +799,13 @@ public class OzoneBucket extends WithMetadata {
     private Iterator<OzoneKey> currentIterator;
     private OzoneKey currentValue;
 
+    String getKeyPrefix() {
+      return keyPrefix;
+    }
+
+    void setKeyPrefix(String keyPrefixPath) {
+      keyPrefix = keyPrefixPath;
+    }
 
     /**
      * Creates an Iterator to iterate over all keys after prevKey in the bucket.
@@ -796,7 +814,7 @@ public class OzoneBucket extends WithMetadata {
      * @param keyPrefix
      */
     KeyIterator(String keyPrefix, String prevKey) throws IOException{
-      this.keyPrefix = keyPrefix;
+      setKeyPrefix(keyPrefix);
       this.currentValue = null;
       this.currentIterator = getNextListOfKeys(prevKey).iterator();
     }
@@ -828,10 +846,287 @@ public class OzoneBucket extends WithMetadata {
      * @param prevKey
      * @return {@code List<OzoneKey>}
      */
-    private List<OzoneKey> getNextListOfKeys(String prevKey) throws
+    List<OzoneKey> getNextListOfKeys(String prevKey) throws
         IOException {
       return proxy.listKeys(volumeName, name, keyPrefix, prevKey,
           listCacheSize);
     }
   }
+
+
+  /**
+   * An Iterator to iterate over {@link OzoneKey} list.
+   *
+   *                  buck-1
+   *                    |
+   *                    a
+   *                    |
+   *      -----------------------------------
+   *     |           |                       |
+   *     b1          b2                      b3
+   *   -----       --------               ----------
+   *   |    |      |    |   |             |    |     |
+   *  c1   c2     d1   d2  d3             e1   e2   e3
+   *                   |                  |
+   *               --------               |
+   *              |        |              |
+   *           d21.txt   d22.txt        e11.txt
+   *
+   * Say, keyPrefix="a" and prevKey="", then will do Depth-First-Traversal and
+   * visit node to getChildren in below fashion:-
+   * 1. getChildren("a/")  2. getChildren("a/b1")  3. getChildren("a/b1/c1")
+   * 4. getChildren("a/b1/c2")  5. getChildren("a/b2/d1")
+   * 6. getChildren("a/b2/d2")  7. getChildren("a/b2/d3")
+   * 8. getChildren("a/b3/e1")  9. getChildren("a/b3/e2")
+   * 10. getChildren("a/b3/e3")
+   *
+   * Note: Does not guarantee to return the list of keys in a sorted order.
+   */
+  private class KeyIteratorV1 extends KeyIterator{
+
+    private Stack<String> stack;
+    private List<OzoneKey> pendingItemsToBeBatched;
+    private boolean addedKeyPrefix;
+
+    /**
+     * Creates an Iterator to iterate over all keys after prevKey in the bucket.
+     * If prevKey is null it iterates from the first key in the bucket.
+     * The returned keys match key prefix.
+     *
+     * @param keyPrefix
+     * @param prevKey
+     */
+    KeyIteratorV1(String keyPrefix, String prevKey) throws IOException {
+      super(keyPrefix, prevKey);
+    }
+
+    @Override
+    List<OzoneKey> getNextListOfKeys(String prevKey) throws IOException {
+      if (stack == null) {
+        stack = new Stack();
+        pendingItemsToBeBatched = new ArrayList<>();
+      }
+
+      // normalize paths
+      if (!addedKeyPrefix) {
+        prevKey = OmUtils.normalizeKey(prevKey, true);
+        String keyPrefixName = "";
+        if (StringUtils.isNotBlank(getKeyPrefix())) {
+          keyPrefixName = OmUtils.normalizeKey(getKeyPrefix(), true);
+        }
+        setKeyPrefix(keyPrefixName);
+      }
+
+      // Get immediate children
+      List<OzoneKey> keysResultList = new ArrayList<>();
+      getChildrenKeys(getKeyPrefix(), prevKey, keysResultList);
+
+      // TODO: Back and Forth seek all the files & dirs, starting from
+      //  startKey till keyPrefix.
+
+      return keysResultList;
+    }
+
+    /**
+     * List children under the given keyPrefix and startKey path. It does
+     * recursive #listStatus calls to list all the sub-keys resultList.
+     *
+     *                  buck-1
+     *                    |
+     *                    a
+     *                    |
+     *      -----------------------------------
+     *     |           |                       |
+     *     b1          b2                      b3
+     *   -----       --------               ----------
+     *   |    |      |    |   |             |    |     |
+     *  c1   c2     d1   d2  d3             e1   e2   e3
+     *                   |                  |
+     *               --------               |
+     *              |        |              |
+     *           d21.txt   d22.txt        e11.txt
+     *
+     * Say, KeyPrefix = "a" and startKey = null;
+     *
+     * Iteration-1) RPC call proxy#listStatus("a").
+     *              Add b3, b2 and b1 to stack.
+     * Iteration-2) pop b1 and do RPC call proxy#listStatus("b1")
+     *              Add c2, c1 to stack.
+     * Iteration-3) pop c1 and do RPC call proxy#listStatus("c1"). Empty list.
+     * Iteration-4) pop c2 and do RPC call proxy#listStatus("c2"). Empty list.
+     * Iteration-5) pop b2 and do RPC call proxy#listStatus("b2")
+     *              Add d3, d2 and d1 to stack.
+     *              ..........
+     *              ..........
+     * Iteration-n) pop e3 and do RPC call proxy#listStatus("e3")
+     *              Reached end of the FS tree.
+     *
+     * @param keyPrefix
+     * @param startKey
+     * @param keysResultList
+     * @return true represents it reached limit batch size, false otherwise.
+     * @throws IOException
+     */
+    private boolean getChildrenKeys(String keyPrefix, String startKey,
+        List<OzoneKey> keysResultList) throws IOException {
+
+      // listStatus API expects a not null 'startKey' value
+      startKey = startKey == null ? "" : startKey;
+
+      // 1. Add pending items to the user key resultList
+      if (addAllPendingItemsToResultList(keysResultList)) {
+        // reached limit batch size.
+        return true;
+      }
+
+      // 2. Get immediate children of keyPrefix, starting with startKey
+      List<OzoneFileStatus> statuses = proxy.listStatus(volumeName, name,
+              keyPrefix, false, startKey, listCacheSize);
+
+      // 3. Special case: ListKey expects keyPrefix element should present in
+      // the resultList, only if startKey is blank. If startKey is not blank
+      // then resultList shouldn't contain the startKey element.
+      // Since proxy#listStatus API won't return keyPrefix element in the
+      // resultList. So, this is to add user given keyPrefix to the return list.
+      addKeyPrefixInfoToResultList(keyPrefix, startKey, keysResultList);
+
+      // 4. Special case: ListKey expects startKey shouldn't present in the
+      // resultList. Since proxy#listStatus API returns startKey element to
+      // the returnList, this function is to remove the startKey element.
+      removeStartKeyIfExistsInStatusList(startKey, statuses);
+
+      boolean reachedLimitCacheSize = false;
+      // This dirList is used to store paths elements in left-to-right order.
+      List<String> dirList = new ArrayList<>();
+
+      // 5. Iterating over the resultStatuses list and add each key to the
+      // resultList. If the listCacheSize reaches then it will add the rest
+      // of the statuses to pendingItemsToBeBatched
+      for (int indx = 0; indx < statuses.size(); indx++) {
+        OzoneFileStatus status = statuses.get(indx);
+        OmKeyInfo keyInfo = status.getKeyInfo();
+        String keyName = keyInfo.getKeyName();
+
+        // Add dir to the dirList
+        if (status.isDirectory()) {
+          dirList.add(keyInfo.getKeyName());
+          // add trailing slash to represent directory
+          keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
+        }
+
+        OzoneKey ozoneKey = new OzoneKey(keyInfo.getVolumeName(),
+                keyInfo.getBucketName(), keyName,
+                keyInfo.getDataSize(), keyInfo.getCreationTime(),
+                keyInfo.getModificationTime(),
+                ReplicationType.valueOf(keyInfo.getType().toString()),
+                keyInfo.getFactor().getNumber());
+
+        // 5.1) Add to the resultList till it reaches limit batch size.
+        // Once it reaches limit, then add rest of the items to
+        // pendingItemsToBeBatched and this will picked in next batch iteration
+        if (!reachedLimitCacheSize && listCacheSize > keysResultList.size()) {
+          keysResultList.add(ozoneKey);
+          reachedLimitCacheSize = listCacheSize <= keysResultList.size();
+        } else {
+          pendingItemsToBeBatched.add(ozoneKey);
+        }
+      }
+
+      // 6. Push elements in reverse order so that the FS tree traversal will
+      // occur in left-to-right fashion.
+      for (int indx = dirList.size() - 1; indx >= 0; indx--) {
+        String dirPathComponent = dirList.get(indx);
+        stack.push(dirPathComponent);
+      }
+
+      if (reachedLimitCacheSize) {
+        return true;
+      }
+
+      // 7. Pop element and seek for its sub-child path(s). Basically moving
+      // seek pointer to next level(depth) in FS tree.
+      while (!stack.isEmpty()) {
+        keyPrefix = stack.pop();
+        if (getChildrenKeys(keyPrefix, "", keysResultList)) {
+          // reached limit batch size.
+          return true;
+        }
+      }
+
+      return false;
+    }
+
+    private void removeStartKeyIfExistsInStatusList(String startKey,
+        List<OzoneFileStatus> statuses) {
+
+      if (StringUtils.isNotBlank(startKey) && !statuses.isEmpty()) {
+        String startKeyPath = startKey;
+        if (startKey.endsWith(OZONE_URI_DELIMITER)) {
+          startKeyPath = OzoneFSUtils.removeTrailingSlashIfNeeded(startKey);
+        }
+        if (StringUtils.equals(statuses.get(0).getKeyInfo().getKeyName(),
+                startKeyPath)) {
+          // remove the duplicateKey from the list.
+          statuses.remove(0);
+        }
+      }
+    }
+
+    private boolean addAllPendingItemsToResultList(List<OzoneKey> keys) {
+
+      Iterator<OzoneKey> ozoneKeyItr = pendingItemsToBeBatched.iterator();
+      while (ozoneKeyItr.hasNext()) {
+        if (listCacheSize <= keys.size()) {
+          // reached limit batch size.
+          return true;
+        }
+        keys.add(ozoneKeyItr.next());
+        ozoneKeyItr.remove();
+      }
+      return false;
+    }
+
+    private void addKeyPrefixInfoToResultList(String keyPrefix,
+        String startKey, List<OzoneKey> keysResultList) throws IOException {
+
+      if (addedKeyPrefix) {
+        return;
+      }
+
+      // setting flag to true.
+      addedKeyPrefix = true;
+
+      // not required to addKeyPrefix
+      // case-1) if keyPrefix is null or empty
+      // case-2) if startKey is not null or empty
+      if (StringUtils.isBlank(keyPrefix) || StringUtils.isNotBlank(startKey)) {
+        return;
+      }
+
+      // TODO: HDDS-4859 will fix the case where startKey not started with
+      //  keyPrefix.
+
+      OzoneFileStatus status = proxy.getOzoneFileStatus(volumeName, name,
+          keyPrefix);
+
+      if (status != null) {
+        OmKeyInfo keyInfo = status.getKeyInfo();
+        String keyName = keyInfo.getKeyName();
+        if (status.isDirectory()) {
+          // add trailing slash to represent directory
+          keyName =
+              OzoneFSUtils.addTrailingSlashIfNeeded(keyInfo.getKeyName());
+        }
+
+        OzoneKey ozoneKey = new OzoneKey(keyInfo.getVolumeName(),
+            keyInfo.getBucketName(), keyName,
+            keyInfo.getDataSize(), keyInfo.getCreationTime(),
+            keyInfo.getModificationTime(),
+            ReplicationType.valueOf(keyInfo.getType().toString()),
+            keyInfo.getFactor().getNumber());
+        keysResultList.add(ozoneKey);
+      }
+    }
+
+  }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index e7e2eb0..c63c21f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -179,7 +179,7 @@ public final class OzoneFSUtils {
       return fileName.toString();
     }
     // failed to find a parent directory.
-    return keyName;
+    return "";
   }
 
   /**
@@ -230,4 +230,12 @@ public final class OzoneFSUtils {
     return layoutVersionEnabled && fsEnabled;
   }
 
+  public static String removeTrailingSlashIfNeeded(String key) {
+    if (key.endsWith(OZONE_URI_DELIMITER)) {
+      java.nio.file.Path keyPath = Paths.get(key);
+      return keyPath.toString();
+    } else {
+      return key;
+    }
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index b877e29..0e44c9d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -17,15 +17,18 @@
 package org.apache.hadoop.ozone.om;
 
 import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.ozone.OzoneFileSystem;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.ObjectStore;
@@ -35,6 +38,7 @@ import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.KeyOutputStream;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
@@ -51,8 +55,13 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
+import java.net.URI;
+import java.util.Arrays;
 import java.nio.charset.StandardCharsets;
 import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 
@@ -60,6 +69,7 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
 import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_EXISTS;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.junit.Assert.assertEquals;
@@ -77,7 +87,7 @@ public class TestObjectStoreV1 {
   private static FileSystem fs;
 
   @Rule
-  public Timeout timeout = new Timeout(240000);
+  public Timeout timeout = new Timeout(1200000);
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -275,6 +285,255 @@ public class TestObjectStoreV1 {
             dirPathC.getObjectID(), true);
   }
 
+  /**
+   * Verify listKeys at different levels.
+   *
+   *                  buck-1
+   *                    |
+   *                    a
+   *                    |
+   *      -----------------------------------
+   *     |              |                       |
+   *     b1             b2                      b3
+   *    -----           --------               ----------
+   *   |      |        |    |   |             |    |     |
+   *  c1     c2        d1   d2  d3             e1   e2   e3
+   *  |      |         |    |   |              |    |    |
+   * c1.tx  c2.tx   d11.tx  | d31.tx           |    |    e31.tx
+   *                      --------             |   e21.tx
+   *                     |        |            |
+   *                   d21.tx   d22.tx        e11.tx
+   *
+   * Above is the FS tree structure.
+   */
+  @Test
+  public void testListKeysAtDifferentLevels() throws Exception {
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    String keyc1 = "/a/b1/c1/c1.tx";
+    String keyc2 = "/a/b1/c2/c2.tx";
+
+    String keyd13 = "/a/b2/d1/d11.tx";
+    String keyd21 = "/a/b2/d2/d21.tx";
+    String keyd22 = "/a/b2/d2/d22.tx";
+    String keyd31 = "/a/b2/d3/d31.tx";
+
+    String keye11 = "/a/b3/e1/e11.tx";
+    String keye21 = "/a/b3/e2/e21.tx";
+    String keye31 = "/a/b3/e3/e31.tx";
+
+    LinkedList<String> keys = new LinkedList<>();
+    keys.add(keyc1);
+    keys.add(keyc2);
+
+    keys.add(keyd13);
+    keys.add(keyd21);
+    keys.add(keyd22);
+    keys.add(keyd31);
+
+    keys.add(keye11);
+    keys.add(keye21);
+    keys.add(keye31);
+
+    int length = 10;
+    byte[] input = new byte[length];
+    Arrays.fill(input, (byte)96);
+
+    createKeys(ozoneBucket, keys);
+
+    // Root level listing keys
+    Iterator<? extends OzoneKey> ozoneKeyIterator =
+        ozoneBucket.listKeys(null, null);
+    verifyFullTreeStructure(ozoneKeyIterator);
+
+    ozoneKeyIterator =
+        ozoneBucket.listKeys("a/", null);
+    verifyFullTreeStructure(ozoneKeyIterator);
+
+    LinkedList<String> expectedKeys;
+
+    // Intermediate level keyPrefix - 2nd level
+    ozoneKeyIterator =
+        ozoneBucket.listKeys("a///b2///", null);
+    expectedKeys = new LinkedList<>();
+    expectedKeys.add("a/b2/");
+    expectedKeys.add("a/b2/d1/");
+    expectedKeys.add("a/b2/d2/");
+    expectedKeys.add("a/b2/d3/");
+    expectedKeys.add("a/b2/d1/d11.tx");
+    expectedKeys.add("a/b2/d2/d21.tx");
+    expectedKeys.add("a/b2/d2/d22.tx");
+    expectedKeys.add("a/b2/d3/d31.tx");
+    checkKeyList(ozoneKeyIterator, expectedKeys);
+
+    // Intermediate level keyPrefix - 3rd level
+    ozoneKeyIterator =
+        ozoneBucket.listKeys("a/b2/d1", null);
+    expectedKeys = new LinkedList<>();
+    expectedKeys.add("a/b2/d1/");
+    expectedKeys.add("a/b2/d1/d11.tx");
+    checkKeyList(ozoneKeyIterator, expectedKeys);
+
+    // Boundary of a level
+    ozoneKeyIterator =
+        ozoneBucket.listKeys("a/b2/d2", "a/b2/d2/d21.tx");
+    expectedKeys = new LinkedList<>();
+    expectedKeys.add("a/b2/d2/d22.tx");
+    checkKeyList(ozoneKeyIterator, expectedKeys);
+
+    // Boundary case - last node in the depth-first-traversal
+    ozoneKeyIterator =
+        ozoneBucket.listKeys("a/b3/e3", "a/b3/e3/e31.tx");
+    expectedKeys = new LinkedList<>();
+    checkKeyList(ozoneKeyIterator, expectedKeys);
+  }
+
+  private void verifyFullTreeStructure(Iterator<? extends OzoneKey> keyItr) {
+    LinkedList<String> expectedKeys = new LinkedList<>();
+    expectedKeys.add("a/");
+    expectedKeys.add("a/b1/");
+    expectedKeys.add("a/b2/");
+    expectedKeys.add("a/b3/");
+    expectedKeys.add("a/b1/c1/");
+    expectedKeys.add("a/b1/c2/");
+    expectedKeys.add("a/b1/c1/c1.tx");
+    expectedKeys.add("a/b1/c2/c2.tx");
+    expectedKeys.add("a/b2/d1/");
+    expectedKeys.add("a/b2/d2/");
+    expectedKeys.add("a/b2/d3/");
+    expectedKeys.add("a/b2/d1/d11.tx");
+    expectedKeys.add("a/b2/d2/d21.tx");
+    expectedKeys.add("a/b2/d2/d22.tx");
+    expectedKeys.add("a/b2/d3/d31.tx");
+    expectedKeys.add("a/b3/e1/");
+    expectedKeys.add("a/b3/e2/");
+    expectedKeys.add("a/b3/e3/");
+    expectedKeys.add("a/b3/e1/e11.tx");
+    expectedKeys.add("a/b3/e2/e21.tx");
+    expectedKeys.add("a/b3/e3/e31.tx");
+    checkKeyList(keyItr, expectedKeys);
+  }
+
+  @Test
+  public void testListKeysWithNotNormalizedPath() throws Exception {
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    String key1 = "/dir1///dir2/file1/";
+    String key2 = "/dir1///dir2/file2/";
+    String key3 = "/dir1///dir2/file3/";
+
+    LinkedList<String> keys = new LinkedList<>();
+    keys.add("dir1/");
+    keys.add("dir1/dir2/");
+    keys.add(OmUtils.normalizeKey(key1, false));
+    keys.add(OmUtils.normalizeKey(key2, false));
+    keys.add(OmUtils.normalizeKey(key3, false));
+
+    int length = 10;
+    byte[] input = new byte[length];
+    Arrays.fill(input, (byte)96);
+
+    createKey(ozoneBucket, key1, 10, input);
+    createKey(ozoneBucket, key2, 10, input);
+    createKey(ozoneBucket, key3, 10, input);
+
+    // Iterator with key name as prefix.
+
+    Iterator<? extends OzoneKey> ozoneKeyIterator =
+            ozoneBucket.listKeys("/dir1//", null);
+
+    checkKeyList(ozoneKeyIterator, keys);
+
+    // Iterator with with normalized key prefix.
+    ozoneKeyIterator =
+            ozoneBucket.listKeys("dir1/");
+
+    checkKeyList(ozoneKeyIterator, keys);
+
+    // Iterator with key name as previous key.
+    ozoneKeyIterator = ozoneBucket.listKeys(null,
+            "/dir1///dir2/file1/");
+
+    // Remove keys before //dir1/dir2/file1
+    keys.remove("dir1/");
+    keys.remove("dir1/dir2/");
+    keys.remove("dir1/dir2/file1");
+
+    checkKeyList(ozoneKeyIterator, keys);
+
+    // Iterator with  normalized key as previous key.
+    ozoneKeyIterator = ozoneBucket.listKeys(null,
+            OmUtils.normalizeKey(key1, false));
+
+    checkKeyList(ozoneKeyIterator, keys);
+  }
+
+  private void checkKeyList(Iterator<? extends OzoneKey > ozoneKeyIterator,
+      List<String> keys) {
+
+    LinkedList<String> outputKeys = new LinkedList<>();
+    while (ozoneKeyIterator.hasNext()) {
+      OzoneKey ozoneKey = ozoneKeyIterator.next();
+      outputKeys.add(ozoneKey.getName());
+    }
+
+    Assert.assertEquals(keys, outputKeys);
+  }
+
+  private void createKeys(OzoneBucket ozoneBucket, List<String> keys)
+      throws Exception {
+    int length = 10;
+    byte[] input = new byte[length];
+    Arrays.fill(input, (byte) 96);
+    for (String key : keys) {
+      createKey(ozoneBucket, key, 10, input);
+    }
+  }
+
+  private void createKey(OzoneBucket ozoneBucket, String key, int length,
+      byte[] input) throws Exception {
+
+    OzoneOutputStream ozoneOutputStream =
+            ozoneBucket.createKey(key, length);
+
+    ozoneOutputStream.write(input);
+    ozoneOutputStream.write(input, 0, 10);
+    ozoneOutputStream.close();
+
+    // Read the key with given key name.
+    OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key);
+    byte[] read = new byte[length];
+    ozoneInputStream.read(read, 0, length);
+    ozoneInputStream.close();
+
+    String inputString = new String(input, StandardCharsets.UTF_8);
+    Assert.assertEquals(inputString, new String(read, StandardCharsets.UTF_8));
+
+    // Read using filesystem.
+    String rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME,
+            bucketName, volumeName, StandardCharsets.UTF_8);
+    OzoneFileSystem o3fs = (OzoneFileSystem) FileSystem.get(new URI(rootPath),
+            conf);
+    FSDataInputStream fsDataInputStream = o3fs.open(new Path(key));
+    read = new byte[length];
+    fsDataInputStream.read(read, 0, length);
+    ozoneInputStream.close();
+
+    Assert.assertEquals(inputString, new String(read, StandardCharsets.UTF_8));
+  }
+
   @Test
   public void testRenameKey() throws IOException {
     String fromKeyName = UUID.randomUUID().toString();
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 604f7d2..f6ae506 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2420,7 +2420,8 @@ public class KeyManagerImpl implements KeyManager {
 
         // Check startKey is an immediate child of keyName. For example,
         // keyName=/a/ and expected startKey=/a/b. startKey can't be /xyz/b.
-        if (!OzoneFSUtils.isImmediateChild(keyName, startKey)) {
+        if (StringUtils.isNotBlank(keyName) &&
+                !OzoneFSUtils.isImmediateChild(keyName, startKey)) {
           if (LOG.isDebugEnabled()) {
             LOG.debug("StartKey {} is not an immediate child of keyName {}. " +
                     "Returns empty list", startKey, keyName);
@@ -2428,6 +2429,11 @@ public class KeyManagerImpl implements KeyManager {
           return Collections.emptyList();
         }
 
+        // assign startKeyPath if prefixPath is empty string.
+        if (StringUtils.isBlank(prefixPath)) {
+          prefixPath = OzoneFSUtils.getParentDir(startKey);
+        }
+
         OzoneFileStatus fileStatusInfo = getOzoneFileStatusV1(volumeName,
                 bucketName, startKey, false, null, true);
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index ebf86ce..e42bc6b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -697,7 +697,8 @@ public final class OMFileRequest {
     if (Strings.isNullOrEmpty(prefixName)) {
       return fileName;
     }
-    return prefixName.concat(OzoneConsts.OZONE_URI_DELIMITER).concat(fileName);
+    prefixName = OzoneFSUtils.addTrailingSlashIfNeeded(prefixName);
+    return prefixName.concat(fileName);
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index f008ac2..d396e8e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -61,7 +61,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
-import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 08/29: HDDS-4514. AllocateBlock : lookup and update open file table for the given path (#1679)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit aec4ca5c3800b1c3c8f5f80f12e3aecbbc5ea7ec
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Tue Dec 22 09:07:25 2020 +0530

    HDDS-4514. AllocateBlock : lookup and update open file table for the given path (#1679)
---
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  74 +++++++
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     |  25 ---
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../om/request/file/OMFileCreateRequestV1.java     |   3 +
 .../ozone/om/request/file/OMFileRequest.java       |  52 +++++
 .../om/request/key/OMAllocateBlockRequestV1.java   | 227 +++++++++++++++++++++
 .../ozone/om/request/key/OMKeyCommitRequestV1.java |  76 +------
 .../om/response/key/OMAllocateBlockResponse.java   |  12 ++
 ...esponse.java => OMAllocateBlockResponseV1.java} |  49 ++---
 .../om/request/file/TestOMFileCreateRequest.java   |  22 --
 .../om/request/file/TestOMFileCreateRequestV1.java |   5 +
 .../om/request/key/TestOMAllocateBlockRequest.java |  44 ++--
 .../request/key/TestOMAllocateBlockRequestV1.java  | 119 +++++++++++
 .../om/request/key/TestOMKeyCommitRequestV1.java   |   5 +
 .../om/request/key/TestOMKeyDeleteRequestV1.java   |  14 ++
 .../ozone/om/request/key/TestOMKeyRequest.java     |  25 +++
 .../response/file/TestOMFileCreateResponseV1.java  |   5 +
 .../response/key/TestOMAllocateBlockResponse.java  |  37 ++--
 ...eV1.java => TestOMAllocateBlockResponseV1.java} |  76 +++----
 .../om/response/key/TestOMKeyCommitResponseV1.java |   5 +
 .../om/response/key/TestOMKeyDeleteResponseV1.java |  16 ++
 21 files changed, 667 insertions(+), 228 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 855484c..c830e07 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.TrashPolicy;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -583,6 +584,7 @@ public class TestOzoneFileSystem {
   @Test
   public void testListStatusOnLargeDirectory() throws Exception {
     Path root = new Path("/");
+    deleteRootDir(); // cleanup
     Set<String> paths = new TreeSet<>();
     int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2;
     for(int i = 0; i < numDirs; i++) {
@@ -592,6 +594,21 @@ public class TestOzoneFileSystem {
     }
 
     FileStatus[] fileStatuses = o3fs.listStatus(root);
+    // Added logs for debugging failures, to check any sub-path mismatches.
+    Set<String> actualPaths = new TreeSet<>();
+    ArrayList<String> actualPathList = new ArrayList<>();
+    if (rootItemCount != fileStatuses.length) {
+      for (int i = 0; i < fileStatuses.length; i++) {
+        actualPaths.add(fileStatuses[i].getPath().getName());
+        actualPathList.add(fileStatuses[i].getPath().getName());
+      }
+      if (rootItemCount != actualPathList.size()) {
+        actualPaths.removeAll(paths);
+        actualPathList.removeAll(paths);
+        LOG.info("actualPaths: {}", actualPaths);
+        LOG.info("actualPathList: {}", actualPathList);
+      }
+    }
     assertEquals(
         "Total directories listed do not match the existing directories",
         numDirs, fileStatuses.length);
@@ -602,6 +619,31 @@ public class TestOzoneFileSystem {
   }
 
   /**
+   * Cleanup files and directories.
+   *
+   * @throws IOException DB failure
+   */
+  protected void deleteRootDir() throws IOException {
+    Path root = new Path("/");
+    FileStatus[] fileStatuses = fs.listStatus(root);
+
+    rootItemCount = 0; // reset to zero
+
+    if (fileStatuses == null) {
+      return;
+    }
+
+    for (FileStatus fStatus : fileStatuses) {
+      fs.delete(fStatus.getPath(), true);
+    }
+
+    fileStatuses = fs.listStatus(root);
+    if (fileStatuses != null) {
+      Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
+    }
+  }
+
+  /**
    * Tests listStatus on a path with subdirs.
    */
   @Test
@@ -662,6 +704,38 @@ public class TestOzoneFileSystem {
   }
 
   @Test
+  public void testAllocateMoreThanOneBlock() throws IOException {
+    Path file = new Path("/file");
+    String str = "TestOzoneFileSystemV1.testSeekOnFileLength";
+    byte[] strBytes = str.getBytes();
+    long numBlockAllocationsOrg =
+            cluster.getOzoneManager().getMetrics().getNumBlockAllocates();
+
+    try (FSDataOutputStream out1 = fs.create(file, FsPermission.getDefault(),
+            true, 8, (short) 3, 1, null)) {
+      for (int i = 0; i < 100000; i++) {
+        out1.write(strBytes);
+      }
+    }
+
+    try (FSDataInputStream stream = fs.open(file)) {
+      FileStatus fileStatus = fs.getFileStatus(file);
+      long blkSize = fileStatus.getBlockSize();
+      long fileLength = fileStatus.getLen();
+      Assert.assertTrue("Block allocation should happen",
+              fileLength > blkSize);
+
+      long newNumBlockAllocations =
+              cluster.getOzoneManager().getMetrics().getNumBlockAllocates();
+
+      Assert.assertTrue("Block allocation should happen",
+              (newNumBlockAllocations > numBlockAllocationsOrg));
+
+      stream.seek(fileLength);
+      assertEquals(-1, stream.read());
+    }
+  }
+
   public void testDeleteRoot() throws IOException {
     Path dir = new Path("/dir");
     fs.mkdirs(dir);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index 212080b..2938714 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -407,31 +407,6 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     }
   }
 
-  /**
-   * Cleanup files and directories.
-   *
-   * @throws IOException DB failure
-   */
-  protected void deleteRootDir() throws IOException {
-    Path root = new Path("/");
-    FileStatus[] fileStatuses = fs.listStatus(root);
-
-    rootItemCount = 0; // reset to zero
-
-    if (fileStatuses == null) {
-      return;
-    }
-
-    for (FileStatus fStatus : fileStatuses) {
-      fs.delete(fStatus.getPath(), true);
-    }
-
-    fileStatuses = fs.listStatus(root);
-    if (fileStatuses != null) {
-      Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
-    }
-  }
-
   @Override
   @Test
   @Ignore("TODO:HDDS-2939")
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 97868d8..4702181 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
 import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
+import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
@@ -134,6 +135,9 @@ public final class OzoneManagerRatisUtils {
     case SetBucketProperty:
       return new OMBucketSetPropertyRequest(omRequest);
     case AllocateBlock:
+      if (omLayoutVersionV1) {
+        return new OMAllocateBlockRequestV1(omRequest);
+      }
       return new OMAllocateBlockRequest(omRequest);
     case CreateKey:
       return new OMKeyCreateRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
index 606e15b..e38908a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -139,6 +139,9 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
                 pathInfoV1.getLeafNodeName());
         dbFileInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
                 omMetadataManager, dbFileKey, keyName);
+        if (dbFileInfo != null) {
+          ozoneManager.getKeyManager().refresh(dbFileInfo);
+        }
       }
 
       // check if the file or directory already existed in OM
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index fc9bab0..aadc126 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -52,6 +52,8 @@ import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nonnull;
 
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 
 /**
  * Base class for file requests.
@@ -851,4 +853,54 @@ public final class OMFileRequest {
   public static boolean isImmediateChild(long parentId, long ancestorId) {
     return parentId == ancestorId;
   }
+
+  /**
+   * Get parent id for the user given path.
+   *
+   * @param bucketId       bucket id
+   * @param pathComponents fie path elements
+   * @param keyName        user given key name
+   * @param omMetadataManager   om metadata manager
+   * @return lastKnownParentID
+   * @throws IOException DB failure or parent not exists in DirectoryTable
+   */
+  public static long getParentID(long bucketId, Iterator<Path> pathComponents,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+
+    long lastKnownParentId = bucketId;
+
+    // If no sub-dirs then bucketID is the root/parent.
+    if(!pathComponents.hasNext()){
+      return bucketId;
+    }
+
+    OmDirectoryInfo omDirectoryInfo;
+    while (pathComponents.hasNext()) {
+      String nodeName = pathComponents.next().toString();
+      boolean reachedLastPathComponent = !pathComponents.hasNext();
+      String dbNodeName =
+              omMetadataManager.getOzonePathKey(lastKnownParentId, nodeName);
+
+      omDirectoryInfo = omMetadataManager.
+              getDirectoryTable().get(dbNodeName);
+      if (omDirectoryInfo != null) {
+        if (reachedLastPathComponent) {
+          throw new OMException("Can not create file: " + keyName +
+                  " as there is already directory in the given path",
+                  NOT_A_FILE);
+        }
+        lastKnownParentId = omDirectoryInfo.getObjectID();
+      } else {
+        // One of the sub-dir doesn't exists in DB. Immediate parent should
+        // exists for committing the key, otherwise will fail the operation.
+        if (!reachedLastPathComponent) {
+          throw new OMException("Failed to find parent directory of "
+                  + keyName + " in DirectoryTable", KEY_NOT_FOUND);
+        }
+        break;
+      }
+    }
+
+    return lastKnownParentId;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java
new file mode 100644
index 0000000..a6a2558
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java
@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponse;
+import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.jetbrains.annotations.NotNull;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles allocate block request layout version V1.
+ */
+public class OMAllocateBlockRequestV1 extends OMAllocateBlockRequest {
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(OMAllocateBlockRequestV1.class);
+
+  public OMAllocateBlockRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    AllocateBlockRequest allocateBlockRequest =
+            getOmRequest().getAllocateBlockRequest();
+
+    KeyArgs keyArgs =
+            allocateBlockRequest.getKeyArgs();
+
+    OzoneManagerProtocolProtos.KeyLocation blockLocation =
+            allocateBlockRequest.getKeyLocation();
+    Preconditions.checkNotNull(blockLocation);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+    long clientID = allocateBlockRequest.getClientID();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumBlockAllocateCalls();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID));
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    String openKeyName = null;
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+            getOmRequest());
+    OMClientResponse omClientResponse = null;
+
+    OmKeyInfo openKeyInfo = null;
+    IOException exception = null;
+    OmBucketInfo omBucketInfo = null;
+    boolean acquiredLock = false;
+
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acl
+      checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.WRITE, allocateBlockRequest.getClientID());
+
+      validateBucketAndVolume(omMetadataManager, volumeName,
+          bucketName);
+
+      // Here we don't acquire bucket/volume lock because for a single client
+      // allocateBlock is called in serial fashion. With this approach, it
+      // won't make 'fail-fast' during race condition case on delete/rename op,
+      // assuming that later it will fail at the key commit operation.
+      openKeyName = getOpenKeyName(volumeName, bucketName, keyName, clientID,
+              ozoneManager);
+      openKeyInfo = getOpenKeyInfo(omMetadataManager, openKeyName, keyName);
+      if (openKeyInfo == null) {
+        throw new OMException("Open Key not found " + openKeyName,
+                KEY_NOT_FOUND);
+      }
+
+      List<OmKeyLocationInfo> newLocationList = Collections.singletonList(
+              OmKeyLocationInfo.getFromProtobuf(blockLocation));
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+      // check bucket and volume quota
+      long preAllocatedSpace = newLocationList.size()
+              * ozoneManager.getScmBlockSize()
+              * openKeyInfo.getFactor().getNumber();
+      checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace);
+      // Append new block
+      openKeyInfo.appendNewBlocks(newLocationList, false);
+
+      // Set modification time.
+      openKeyInfo.setModificationTime(keyArgs.getModificationTime());
+
+      // Set the UpdateID to current transactionLogIndex
+      openKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+
+      // Add to cache.
+      addOpenTableCacheEntry(trxnLogIndex, omMetadataManager, openKeyName,
+              openKeyInfo);
+      omBucketInfo.incrUsedBytes(preAllocatedSpace);
+
+      omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder()
+              .setKeyLocation(blockLocation).build());
+      omClientResponse = getOmClientResponse(clientID, omResponse,
+              openKeyInfo, omBucketInfo.copyObject());
+      LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}",
+              volumeName, bucketName, openKeyName);
+    } catch (IOException ex) {
+      omMetrics.incNumBlockAllocateCallFails();
+      exception = ex;
+      omClientResponse = new OMAllocateBlockResponse(createErrorOMResponse(
+              omResponse, exception));
+      LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " +
+              "Exception:{}", volumeName, bucketName, openKeyName, exception);
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+              omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+                bucketName);
+      }
+    }
+
+    auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap,
+            exception, getOmRequest().getUserInfo()));
+
+    return omClientResponse;
+  }
+
+  private OmKeyInfo getOpenKeyInfo(OMMetadataManager omMetadataManager,
+      String openKeyName, String keyName) throws IOException {
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+            omMetadataManager, openKeyName, fileName);
+  }
+
+  private String getOpenKeyName(String volumeName, String bucketName,
+      String keyName, long clientID, OzoneManager ozoneManager)
+          throws IOException {
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+            keyName, omMetadataManager);
+    return omMetadataManager.getOpenFileName(parentID, fileName,
+            clientID);
+  }
+
+  private void addOpenTableCacheEntry(long trxnLogIndex,
+      OMMetadataManager omMetadataManager, String openKeyName,
+      OmKeyInfo openKeyInfo) {
+    String fileName = openKeyInfo.getFileName();
+    OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, openKeyName,
+            openKeyInfo, fileName, trxnLogIndex);
+  }
+
+  @NotNull
+  private OMClientResponse getOmClientResponse(long clientID,
+      OMResponse.Builder omResponse, OmKeyInfo openKeyInfo,
+      OmBucketInfo omBucketInfo) {
+    return new OMAllocateBlockResponseV1(omResponse.build(), openKeyInfo,
+            clientID, omBucketInfo);
+  }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
index 8c47f7e..7d99119 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -53,7 +52,6 @@ import java.util.List;
 import java.util.Map;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 
 /**
@@ -130,8 +128,8 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
       String fileName = OzoneFSUtils.getFileName(keyName);
       omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
       long bucketId = omBucketInfo.getObjectID();
-      long parentID = getParentID(bucketId, pathComponents, keyName,
-              omMetadataManager, ozoneManager);
+      long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+              keyName, omMetadataManager);
       String dbFileKey = omMetadataManager.getOzonePathKey(parentID, fileName);
       dbOpenFileKey = omMetadataManager.getOpenFileName(parentID, fileName,
               commitKeyRequest.getClientID());
@@ -197,74 +195,4 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
 
     return omClientResponse;
   }
-
-
-  /**
-   * Check for directory exists with same name, if it exists throw error.
-   *
-   * @param keyName                  key name
-   * @param ozoneManager             Ozone Manager
-   * @param reachedLastPathComponent true if the path component is a fileName
-   * @throws IOException if directory exists with same name
-   */
-  private void checkDirectoryAlreadyExists(String keyName,
-                                           OzoneManager ozoneManager,
-                                           boolean reachedLastPathComponent)
-          throws IOException {
-    // Reached last component, which would be a file. Returns its parentID.
-    if (reachedLastPathComponent && ozoneManager.getEnableFileSystemPaths()) {
-      throw new OMException("Can not create file: " + keyName +
-              " as there is already directory in the given path", NOT_A_FILE);
-    }
-  }
-
-  /**
-   * Get parent id for the user given path.
-   *
-   * @param bucketId          bucket id
-   * @param pathComponents    fie path elements
-   * @param keyName           user given key name
-   * @param omMetadataManager metadata manager
-   * @return lastKnownParentID
-   * @throws IOException DB failure or parent not exists in DirectoryTable
-   */
-  private long getParentID(long bucketId, Iterator<Path> pathComponents,
-                           String keyName, OMMetadataManager omMetadataManager,
-                           OzoneManager ozoneManager)
-          throws IOException {
-
-    long lastKnownParentId = bucketId;
-
-    // If no sub-dirs then bucketID is the root/parent.
-    if(!pathComponents.hasNext()){
-      return bucketId;
-    }
-
-    OmDirectoryInfo omDirectoryInfo;
-    while (pathComponents.hasNext()) {
-      String nodeName = pathComponents.next().toString();
-      boolean reachedLastPathComponent = !pathComponents.hasNext();
-      String dbNodeName =
-              omMetadataManager.getOzonePathKey(lastKnownParentId, nodeName);
-
-      omDirectoryInfo = omMetadataManager.
-              getDirectoryTable().get(dbNodeName);
-      if (omDirectoryInfo != null) {
-        checkDirectoryAlreadyExists(keyName, ozoneManager,
-                reachedLastPathComponent);
-        lastKnownParentId = omDirectoryInfo.getObjectID();
-      } else {
-        // One of the sub-dir doesn't exists in DB. Immediate parent should
-        // exists for committing the key, otherwise will fail the operation.
-        if (!reachedLastPathComponent) {
-          throw new OMException("Failed to commit key, as parent directory of "
-                  + keyName + " entry is not found in DirectoryTable",
-                  KEY_NOT_FOUND);
-        }
-        break;
-      }
-    }
-
-    return lastKnownParentId;
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
index 4b20853..c97d702 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
@@ -74,4 +74,16 @@ public class OMAllocateBlockResponse extends OMClientResponse {
         omMetadataManager.getBucketKey(omKeyInfo.getVolumeName(),
             omKeyInfo.getBucketName()), omBucketInfo);
   }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
+
+  protected long getClientID() {
+    return clientID;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
similarity index 59%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
index 4b20853..ef8b639 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
@@ -18,60 +18,43 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
-import java.io.IOException;
 import javax.annotation.Nonnull;
+import java.io.IOException;
 
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
- * Response for AllocateBlock request.
+ * Response for AllocateBlock request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE})
-public class OMAllocateBlockResponse extends OMClientResponse {
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE})
+public class OMAllocateBlockResponseV1 extends OMAllocateBlockResponse {
 
-  private OmKeyInfo omKeyInfo;
-  private long clientID;
-  private OmBucketInfo omBucketInfo;
-
-  public OMAllocateBlockResponse(@Nonnull OMResponse omResponse,
+  public OMAllocateBlockResponseV1(@Nonnull OMResponse omResponse,
       @Nonnull OmKeyInfo omKeyInfo, long clientID,
       @Nonnull OmBucketInfo omBucketInfo) {
-    super(omResponse);
-    this.omKeyInfo = omKeyInfo;
-    this.clientID = clientID;
-    this.omBucketInfo = omBucketInfo;
-  }
-
-  /**
-   * For when the request is not successful.
-   * For a successful request, the other constructor should be used.
-   */
-  public OMAllocateBlockResponse(@Nonnull OMResponse omResponse) {
-    super(omResponse);
-    checkStatusNotOK();
+    super(omResponse, omKeyInfo, clientID, omBucketInfo);
   }
 
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
-    String openKey = omMetadataManager.getOpenKey(omKeyInfo.getVolumeName(),
-        omKeyInfo.getBucketName(), omKeyInfo.getKeyName(), clientID);
-    omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, openKey,
-        omKeyInfo);
+    OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation,
+            getOmKeyInfo(), getClientID());
 
     // update bucket usedBytes.
     omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-        omMetadataManager.getBucketKey(omKeyInfo.getVolumeName(),
-            omKeyInfo.getBucketName()), omBucketInfo);
+            omMetadataManager.getBucketKey(getOmKeyInfo().getVolumeName(),
+                    getOmKeyInfo().getBucketName()), getOmBucketInfo());
   }
 }
+
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
index a963f88..0a76589 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
@@ -398,28 +398,6 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
   }
 
   /**
-   * Verify path in open key table. Also, it returns OMKeyInfo for the given
-   * key path.
-   *
-   * @param key      key name
-   * @param id       client id
-   * @param doAssert if true then do assertion, otherwise it just skip.
-   * @return om key info for the given key path.
-   * @throws Exception DB failure
-   */
-  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
-                                               boolean doAssert)
-          throws Exception {
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-            key, id);
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-    if (doAssert) {
-      Assert.assertNotNull("Failed to find key in OpenKeyTable", omKeyInfo);
-    }
-    return omKeyInfo;
-  }
-
-  /**
    * Gets OMFileCreateRequest reference.
    *
    * @param omRequest om request
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
index 7ded386..046ac90 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.util.StringUtils;
@@ -183,6 +184,10 @@ public class TestOMFileCreateRequestV1 extends TestOMFileCreateRequest {
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
     config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
index 4b3d38f..9d26d0d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.UUID;
 
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -57,21 +58,19 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable(volumeName, bucketName);
 
     OMRequest modifiedOmRequest =
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
     // Check before calling validateAndUpdateCache. As adding DB entry has
     // not added any blocks, so size should be zero.
 
-    OmKeyInfo omKeyInfo =
-        omMetadataManager.getOpenKeyTable().get(omMetadataManager.getOpenKey(
-            volumeName, bucketName, keyName, clientID));
+    OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(keyName, clientID,
+            true);
 
     List<OmKeyLocationInfo> omKeyLocationInfo =
         omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -87,10 +86,8 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
 
     // Check open table whether new block is added or not.
 
-    omKeyInfo =
-        omMetadataManager.getOpenKeyTable().get(omMetadataManager.getOpenKey(
-            volumeName, bucketName, keyName, clientID));
-
+    omKeyInfo = verifyPathInOpenKeyTable(keyName, clientID,
+            true);
 
     // Check modification time
     Assert.assertEquals(modifiedOmRequest.getAllocateBlockRequest()
@@ -119,6 +116,12 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
 
   }
 
+  @NotNull
+  protected OMAllocateBlockRequest getOmAllocateBlockRequest(
+          OMRequest modifiedOmRequest) {
+    return new OMAllocateBlockRequest(modifiedOmRequest);
+  }
+
   @Test
   public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
 
@@ -126,7 +129,7 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
 
     OMClientResponse omAllocateBlockResponse =
@@ -145,7 +148,7 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
 
     // Added only volume to DB.
@@ -168,7 +171,7 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
     // Add volume, bucket entries to DB.
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
@@ -190,10 +193,11 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
    * @return OMRequest - modified request returned from preExecute.
    * @throws Exception
    */
-  private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
+  protected OMRequest doPreExecute(OMRequest originalOMRequest)
+      throws Exception {
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(originalOMRequest);
+            getOmAllocateBlockRequest(originalOMRequest);
 
     OMRequest modifiedOmRequest =
         omAllocateBlockRequest.preExecute(ozoneManager);
@@ -228,7 +232,7 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
   }
 
 
-  private OMRequest createAllocateBlockRequest() {
+  protected OMRequest createAllocateBlockRequest() {
 
     KeyArgs keyArgs = KeyArgs.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
@@ -246,4 +250,12 @@ public class TestOMAllocateBlockRequest extends TestOMKeyRequest {
         .setAllocateBlockRequest(allocateBlockRequest).build();
 
   }
+
+  protected String addKeyToOpenKeyTable(String volumeName, String bucketName)
+          throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
+            keyName, clientID, replicationType, replicationFactor,
+            omMetadataManager);
+    return "";
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java
new file mode 100644
index 0000000..4e74979
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone.om.request.key;
+
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests OMAllocateBlockRequest class layout version V1.
+ */
+public class TestOMAllocateBlockRequestV1 extends TestOMAllocateBlockRequest {
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    return config;
+  }
+
+  protected String addKeyToOpenKeyTable(String volumeName, String bucketName)
+          throws Exception {
+    // need to initialize parentID
+    String parentDir = keyName;
+    String fileName = "file1";
+    keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName;
+
+    // add parentDir to dirTable
+    long parentID = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, parentDir, omMetadataManager);
+    long txnId = 50;
+    long objectId = parentID + 1;
+
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
+                    Time.now());
+
+    // add key to openFileTable
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+
+    return omMetadataManager.getOzonePathKey(parentID, fileName);
+  }
+
+  @NotNull
+  protected OMAllocateBlockRequest getOmAllocateBlockRequest(
+          OzoneManagerProtocolProtos.OMRequest modifiedOmRequest) {
+    return new OMAllocateBlockRequestV1(modifiedOmRequest);
+  }
+
+  @Override
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+      boolean doAssert) throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      if (indx == pathComponents.length - 1) {
+        String dbOpenFileName = omMetadataManager.getOpenFileName(
+                parentId, pathElement, id);
+        OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+                .get(dbOpenFileName);
+        if (doAssert) {
+          Assert.assertNotNull("Invalid key!", omKeyInfo);
+        }
+        return omKeyInfo;
+      } else {
+        // directory
+        String dbKey = omMetadataManager.getOzonePathKey(parentId,
+                pathElement);
+        OmDirectoryInfo dirInfo =
+                omMetadataManager.getDirectoryTable().get(dbKey);
+        parentId = dirInfo.getObjectID();
+      }
+    }
+    if (doAssert) {
+      Assert.fail("Invalid key!");
+    }
+    return  null;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
index f5168e1..ed1e2bd 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.util.Time;
@@ -87,6 +88,10 @@ public class TestOMKeyCommitRequestV1 extends TestOMKeyCommitRequest {
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
     config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
index dbba143..7527e78 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
@@ -18,8 +18,11 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.util.Time;
@@ -54,4 +57,15 @@ public class TestOMKeyDeleteRequestV1 extends TestOMKeyDeleteRequest {
             fileName, omKeyInfo, -1, 50, omMetadataManager);
     return omKeyInfo.getPath();
   }
+
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    return config;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
index 4bf66bb..33f58bb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
@@ -27,11 +27,13 @@ import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.ozone.om.ResolvedBucket;
 import org.apache.hadoop.ozone.om.KeyManager;
 import org.apache.hadoop.ozone.om.KeyManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
 import org.jetbrains.annotations.NotNull;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.rules.TemporaryFolder;
@@ -179,6 +181,29 @@ public class TestOMKeyRequest {
     return new OzoneConfiguration();
   }
 
+
+  /**
+   * Verify path in open key table. Also, it returns OMKeyInfo for the given
+   * key path.
+   *
+   * @param key      key name
+   * @param id       client id
+   * @param doAssert if true then do assertion, otherwise it just skip.
+   * @return om key info for the given key path.
+   * @throws Exception DB failure
+   */
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+                                               boolean doAssert)
+          throws Exception {
+    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
+            key, id);
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    if (doAssert) {
+      Assert.assertNotNull("Failed to find key in OpenKeyTable", omKeyInfo);
+    }
+    return omKeyInfo;
+  }
+
   @After
   public void stop() {
     omMetrics.unRegister();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
index 19a1bb9..bc4345e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
 import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse;
@@ -68,6 +69,10 @@ public class TestOMFileCreateResponseV1 extends TestOMKeyCreateResponse {
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
     config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
index 602ec99..33c16ae 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -37,8 +38,7 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
+    OmKeyInfo omKeyInfo = createOmKeyInfo();
     OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
@@ -50,11 +50,9 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
         .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
         .build();
     OMAllocateBlockResponse omAllocateBlockResponse =
-        new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
-            omBucketInfo);
+            getOmAllocateBlockResponse(omKeyInfo, omBucketInfo, omResponse);
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKey();
 
     // Not adding key entry before to test whether commit is successful or not.
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
@@ -68,8 +66,7 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
 
   @Test
   public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
+    OmKeyInfo omKeyInfo = createOmKeyInfo();
     OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
@@ -81,12 +78,10 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
         .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
         .build();
     OMAllocateBlockResponse omAllocateBlockResponse =
-        new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
-            omBucketInfo);
+            getOmAllocateBlockResponse(omKeyInfo, omBucketInfo, omResponse);
 
     // Before calling addToDBBatch
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKey();
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
     omAllocateBlockResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
@@ -98,4 +93,22 @@ public class TestOMAllocateBlockResponse extends TestOMKeyResponse {
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
   }
+
+  protected OmKeyInfo createOmKeyInfo() throws Exception {
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, replicationType, replicationFactor);
+  }
+
+  protected String getOpenKey() throws Exception {
+    return omMetadataManager.getOpenKey(volumeName, bucketName,
+            keyName, clientID);
+  }
+
+  @NotNull
+  protected OMAllocateBlockResponse getOmAllocateBlockResponse(
+          OmKeyInfo omKeyInfo, OmBucketInfo omBucketInfo,
+          OMResponse omResponse) {
+    return new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
+            omBucketInfo);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
similarity index 53%
copy from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
copy to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
index 369faa9..e105a37 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
@@ -20,75 +20,54 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.util.Time;
 import org.jetbrains.annotations.NotNull;
-import org.junit.Assert;
 
 /**
- * Tests OMKeyCommitResponse layout version V1.
+ * Tests OMAllocateBlockResponse layout version V1.
  */
-public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
+public class TestOMAllocateBlockResponseV1
+        extends TestOMAllocateBlockResponse {
 
-  @NotNull
-  protected OMKeyCommitResponse getOmKeyCommitResponse(
-          OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
-          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
-          String ozoneKey) {
-    Assert.assertNotNull(omBucketInfo);
-    return new OMKeyCommitResponseV1(
-            omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
-            omBucketInfo);
-  }
+  // logical ID, which really doesn't exist in dirTable
+  private long parentID = 10;
+  private String fileName = "file1";
 
-  @NotNull
-  @Override
-  protected OmKeyInfo getOmKeyInfo() {
-    Assert.assertNotNull(omBucketInfo);
-    return TestOMRequestUtils.createOmKeyInfo(volumeName,
-            omBucketInfo.getBucketName(), keyName, replicationType,
-            replicationFactor,
-            omBucketInfo.getObjectID() + 1,
-            omBucketInfo.getObjectID(), 100, Time.now());
-  }
+  protected OmKeyInfo createOmKeyInfo() throws Exception {
+    // need to initialize parentID
+    String parentDir = keyName;
+    keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName;
 
-  @NotNull
-  @Override
-  protected void addKeyToOpenKeyTable() throws Exception {
-    Assert.assertNotNull(omBucketInfo);
-    long parentID = omBucketInfo.getObjectID();
-    long objectId = parentID + 10;
+    long txnId = 50;
+    long objectId = parentID + 1;
 
     OmKeyInfo omKeyInfoV1 =
             TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
                     HddsProtos.ReplicationType.RATIS,
-                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
                     Time.now());
-
-    String fileName = OzoneFSUtils.getFileName(keyName);
-    TestOMRequestUtils.addFileToKeyTable(true, false,
-            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+    return omKeyInfoV1;
   }
 
-  @NotNull
-  @Override
-  protected String getOpenKeyName() {
-    Assert.assertNotNull(omBucketInfo);
+  protected String getOpenKey() throws Exception {
     return omMetadataManager.getOpenFileName(
-            omBucketInfo.getObjectID(), keyName, clientID);
+            parentID, fileName, clientID);
   }
 
   @NotNull
-  @Override
-  protected String getOzoneKey() {
-    Assert.assertNotNull(omBucketInfo);
-    return omMetadataManager.getOzonePathKey(omBucketInfo.getObjectID(),
-            keyName);
+  protected OMAllocateBlockResponse getOmAllocateBlockResponse(
+          OmKeyInfo omKeyInfo, OmVolumeArgs omVolumeArgs,
+          OmBucketInfo omBucketInfo, OMResponse omResponse) {
+    return new OMAllocateBlockResponseV1(omResponse, omKeyInfo, clientID,
+            omBucketInfo);
   }
 
   @NotNull
@@ -96,6 +75,11 @@ public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
     config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
     return config;
   }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
index 369faa9..1e59ce8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.util.Time;
@@ -96,6 +97,10 @@ public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
     config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
index 3cfec38..d35c79e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
@@ -18,11 +18,15 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 
 /**
@@ -67,4 +71,16 @@ public class TestOMKeyDeleteResponseV1 extends TestOMKeyDeleteResponse {
             getOmBucketInfo().getObjectID() + 1,
             getOmBucketInfo().getObjectID(), 100, Time.now());
   }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    return config;
+  }
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 12/29: HDDS-4720. RenameKey : add unit test to verify bucket#renameKey (#1847)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 93716f0d3a09de9ece73369ed30f352271bd926b
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Jan 27 22:44:09 2021 +0530

    HDDS-4720. RenameKey : add unit test to verify bucket#renameKey (#1847)
---
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 112 +++++++++++++++++++++
 1 file changed, 112 insertions(+)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index b88bbc3..d343e2c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.KeyOutputStream;
@@ -52,8 +53,12 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.UUID;
 
+import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_EXISTS;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -264,6 +269,113 @@ public class TestObjectStoreV1 {
             dirPathC.getObjectID(), true);
   }
 
+  @Test
+  public void testRenameKey() throws IOException {
+    String fromKeyName = UUID.randomUUID().toString();
+    String value = "sample value";
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    createTestKey(bucket, fromKeyName, value);
+
+    // Rename to empty string should fail.
+    String toKeyName = "";
+    try {
+      bucket.renameKey(fromKeyName, toKeyName);
+      fail("Rename to empty string should fail!");
+    } catch (OMException ome) {
+      Assert.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME,
+              ome.getResult());
+    }
+
+    toKeyName = UUID.randomUUID().toString();
+    bucket.renameKey(fromKeyName, toKeyName);
+
+    // Lookup for old key should fail.
+    try {
+      bucket.getKey(fromKeyName);
+      fail("Lookup for old from key name should fail!");
+    } catch (OMException ome) {
+      Assert.assertEquals(KEY_NOT_FOUND, ome.getResult());
+    }
+
+    OzoneKey key = bucket.getKey(toKeyName);
+    Assert.assertEquals(toKeyName, key.getName());
+  }
+
+  @Test
+  public void testKeyRenameWithSubDirs() throws Exception {
+    String keyName1 = "dir1/dir2/file1";
+    String keyName2 = "dir1/dir2/file2";
+
+    String newKeyName1 = "dir1/key1";
+    String newKeyName2 = "dir1/key2";
+
+    String value = "sample value";
+    OzoneClient client = cluster.getClient();
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    createTestKey(bucket, keyName1, value);
+    createTestKey(bucket, keyName2, value);
+
+    bucket.renameKey(keyName1, newKeyName1);
+    bucket.renameKey(keyName2, newKeyName2);
+
+    // new key should exist
+    Assert.assertEquals(newKeyName1, bucket.getKey(newKeyName1).getName());
+    Assert.assertEquals(newKeyName2, bucket.getKey(newKeyName2).getName());
+
+    // old key should not exist
+    assertKeyRenamedEx(bucket, keyName1);
+    assertKeyRenamedEx(bucket, keyName2);
+  }
+
+  @Test
+  public void testRenameToAnExistingKey() throws Exception {
+    String keyName1 = "dir1/dir2/file1";
+    String keyName2 = "dir1/dir2/file2";
+
+    String value = "sample value";
+    OzoneClient client = cluster.getClient();
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    createTestKey(bucket, keyName1, value);
+    createTestKey(bucket, keyName2, value);
+
+    try {
+      bucket.renameKey(keyName1, keyName2);
+      fail("Should throw exception as destin key already exists!");
+    } catch (OMException e) {
+      Assert.assertEquals(KEY_ALREADY_EXISTS, e.getResult());
+    }
+  }
+
+  private void assertKeyRenamedEx(OzoneBucket bucket, String keyName)
+      throws Exception {
+    OMException oe = null;
+    try {
+      bucket.getKey(keyName);
+    } catch (OMException e) {
+      oe = e;
+    }
+    Assert.assertEquals(KEY_NOT_FOUND, oe.getResult());
+  }
+
+  private void createTestKey(OzoneBucket bucket, String keyName,
+      String keyValue) throws IOException {
+    OzoneOutputStream out = bucket.createKey(keyName,
+            keyValue.getBytes().length, STAND_ALONE,
+            ONE, new HashMap<>());
+    out.write(keyValue.getBytes());
+    out.close();
+    OzoneKey key = bucket.getKey(keyName);
+    Assert.assertEquals(keyName, key.getName());
+  }
+
   private OmDirectoryInfo getDirInfo(String parentKey) throws Exception {
     OMMetadataManager omMetadataManager =
             cluster.getOzoneManager().getMetadataManager();

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 26/29: HDDS-4917.[FSO]Implement ACL requests for new layout (#2024)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 31687bf71b7fed0f6c383fcc36109dabf0e54da0
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Mon Mar 15 21:40:23 2021 +0530

    HDDS-4917.[FSO]Implement ACL requests for new layout (#2024)
---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |   8 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  12 ++
 .../om/request/key/acl/OMKeyAclRequestV1.java      | 165 +++++++++++++++++++++
 .../om/request/key/acl/OMKeyAddAclRequestV1.java   | 149 +++++++++++++++++++
 .../request/key/acl/OMKeyRemoveAclRequestV1.java   | 153 +++++++++++++++++++
 .../om/request/key/acl/OMKeySetAclRequestV1.java   | 146 ++++++++++++++++++
 .../om/response/key/acl/OMKeyAclResponse.java      |   3 +
 .../om/response/key/acl/OMKeyAclResponseV1.java    |  75 ++++++++++
 .../ozone/om/request/key/TestOMKeyAclRequest.java  |  81 ++++++----
 .../om/request/key/TestOMKeyAclRequestV1.java      |  81 ++++++++++
 10 files changed, 845 insertions(+), 28 deletions(-)

diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index f45a96b..b2ff866 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1698,12 +1698,16 @@ public class KeyManagerImpl implements KeyManager {
     String volume = obj.getVolumeName();
     String bucket = obj.getBucketName();
     String keyName = obj.getKeyName();
-
+    OmKeyInfo keyInfo;
     metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket);
     try {
       validateBucket(volume, bucket);
       String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
-      OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
+      if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+        keyInfo = getOmKeyInfoV1(volume, bucket, keyName);
+      } else {
+        keyInfo = getOmKeyInfo(volume, bucket, keyName);
+      }
       if (keyInfo == null) {
         throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND);
       }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 52305253..b3e6c4f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -50,8 +50,11 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeysRenameRequest;
 import org.apache.hadoop.ozone.om.request.key.OMTrashRecoverRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequestV1;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestV1;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequest;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequestV1;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixAddAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixSetAclRequest;
@@ -230,6 +233,9 @@ public final class OzoneManagerRatisUtils {
       } else if (ObjectType.BUCKET == type) {
         return new OMBucketAddAclRequest(omRequest);
       } else if (ObjectType.KEY == type) {
+        if (isBucketFSOptimized()){
+          return new OMKeyAddAclRequestV1(omRequest);
+        }
         return new OMKeyAddAclRequest(omRequest);
       } else {
         return new OMPrefixAddAclRequest(omRequest);
@@ -241,6 +247,9 @@ public final class OzoneManagerRatisUtils {
       } else if (ObjectType.BUCKET == type) {
         return new OMBucketRemoveAclRequest(omRequest);
       } else if (ObjectType.KEY == type) {
+        if (isBucketFSOptimized()){
+          return new OMKeyRemoveAclRequestV1(omRequest);
+        }
         return new OMKeyRemoveAclRequest(omRequest);
       } else {
         return new OMPrefixRemoveAclRequest(omRequest);
@@ -252,6 +261,9 @@ public final class OzoneManagerRatisUtils {
       } else if (ObjectType.BUCKET == type) {
         return new OMBucketSetAclRequest(omRequest);
       } else if (ObjectType.KEY == type) {
+        if (isBucketFSOptimized()){
+          return new OMKeySetAclRequestV1(omRequest);
+        }
         return new OMKeySetAclRequest(omRequest);
       } else {
         return new OMPrefixSetAclRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestV1.java
new file mode 100644
index 0000000..3dfb29e
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestV1.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.request.key.acl;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.ObjectParser;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+
+/**
+ * Handles key ACL requests layout version V1.
+ */
+public abstract class OMKeyAclRequestV1 extends OMKeyAclRequest {
+
+  public OMKeyAclRequestV1(OzoneManagerProtocolProtos.OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    OmKeyInfo omKeyInfo = null;
+
+    OzoneManagerProtocolProtos.OMResponse.Builder omResponse = onInit();
+    OMClientResponse omClientResponse = null;
+    IOException exception = null;
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    boolean lockAcquired = false;
+    String volume = null;
+    String bucket = null;
+    String key = null;
+    boolean operationResult = false;
+    Result result = null;
+    try {
+      ObjectParser objectParser = new ObjectParser(getPath(),
+          OzoneManagerProtocolProtos.OzoneObj.ObjectType.KEY);
+
+      volume = objectParser.getVolume();
+      bucket = objectParser.getBucket();
+      key = objectParser.getKey();
+
+      // check Acl
+      if (ozoneManager.getAclsEnabled()) {
+        checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
+            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL,
+            volume, bucket, key);
+      }
+      lockAcquired = omMetadataManager.getLock()
+          .acquireWriteLock(BUCKET_LOCK, volume, bucket);
+      OzoneFileStatus keyStatus = OMFileRequest
+          .getOMKeyInfoIfExists(omMetadataManager, volume, bucket, key, 0);
+      if (keyStatus == null) {
+        throw new OMException("Key not found. Key:" + key, KEY_NOT_FOUND);
+      }
+      omKeyInfo = keyStatus.getKeyInfo();
+      String dbKey = omKeyInfo.getPath();
+      boolean isDirectory = keyStatus.isDirectory();
+      operationResult = apply(omKeyInfo, trxnLogIndex);
+      omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+
+      // Update the modification time when updating ACLs of Key.
+      long modificationTime = omKeyInfo.getModificationTime();
+      if (getOmRequest().getAddAclRequest().hasObj() && operationResult) {
+        modificationTime =
+            getOmRequest().getAddAclRequest().getModificationTime();
+      } else if (getOmRequest().getSetAclRequest().hasObj()) {
+        modificationTime =
+            getOmRequest().getSetAclRequest().getModificationTime();
+      } else if (getOmRequest().getRemoveAclRequest().hasObj()
+          && operationResult) {
+        modificationTime =
+            getOmRequest().getRemoveAclRequest().getModificationTime();
+      }
+      omKeyInfo.setModificationTime(modificationTime);
+
+      // update cache.
+      if (isDirectory) {
+        Table<String, OmDirectoryInfo> dirTable =
+            omMetadataManager.getDirectoryTable();
+        dirTable.addCacheEntry(new CacheKey<>(dbKey),
+            new CacheValue<>(Optional.of(OMFileRequest.
+                getDirectoryInfo(omKeyInfo)), trxnLogIndex));
+      } else {
+        omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(dbKey),
+            new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      }
+      omClientResponse =
+          onSuccess(omResponse, omKeyInfo, operationResult, isDirectory);
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = onFailure(omResponse, ex);
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+          omDoubleBufferHelper);
+      if (lockAcquired) {
+        omMetadataManager.getLock()
+            .releaseWriteLock(BUCKET_LOCK, volume, bucket);
+      }
+    }
+
+    OzoneObj obj = getObject();
+    Map<String, String> auditMap = obj.toAuditMap();
+    onComplete(result, operationResult, exception, trxnLogIndex,
+        ozoneManager.getAuditLogger(), auditMap);
+
+    return omClientResponse;
+  }
+
+  /**
+   * Get the om client response on failure case with lock.
+   *
+   * @param omResponse
+   * @param exception
+   * @return OMClientResponse
+   */
+  @Override OMClientResponse onFailure(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      IOException exception) {
+    return new OMKeyAclResponseV1(createErrorOMResponse(omResponse, exception));
+  }
+
+  abstract OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult, boolean isDirectory);
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestV1.java
new file mode 100644
index 0000000..dbe0289
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestV1.java
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.request.key.acl;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Handle add Acl request for bucket for layout version V1.
+ */
+public class OMKeyAddAclRequestV1 extends OMKeyAclRequestV1 {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMKeyAddAclRequestV1.class);
+
+  @Override public OzoneManagerProtocolProtos.OMRequest preExecute(
+      OzoneManager ozoneManager) throws IOException {
+    long modificationTime = Time.now();
+    OzoneManagerProtocolProtos.AddAclRequest.Builder addAclRequestBuilder =
+        getOmRequest().getAddAclRequest().toBuilder()
+            .setModificationTime(modificationTime);
+
+    return getOmRequest().toBuilder().setAddAclRequest(addAclRequestBuilder)
+        .setUserInfo(getUserInfo()).build();
+  }
+
+  private String path;
+  private List<OzoneAcl> ozoneAcls;
+  private OzoneObj obj;
+
+  public OMKeyAddAclRequestV1(OzoneManagerProtocolProtos.OMRequest omRequest) {
+    super(omRequest);
+    OzoneManagerProtocolProtos.AddAclRequest addAclRequest =
+        getOmRequest().getAddAclRequest();
+    obj = OzoneObjInfo.fromProtobuf(addAclRequest.getObj());
+    path = obj.getPath();
+    ozoneAcls =
+        Lists.newArrayList(OzoneAcl.fromProtobuf(addAclRequest.getAcl()));
+  }
+
+  @Override String getPath() {
+    return path;
+  }
+
+  @Override OzoneObj getObject() {
+    return obj;
+  }
+
+  @Override OzoneManagerProtocolProtos.OMResponse.Builder onInit() {
+    return OmResponseUtil.getOMResponseBuilder(getOmRequest());
+  }
+
+  @Override OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setAddAclResponse(
+        OzoneManagerProtocolProtos.AddAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponse(omResponse.build(), omKeyInfo);
+  }
+
+  @Override void onComplete(Result result, boolean operationResult,
+      IOException exception, long trxnLogIndex, AuditLogger auditLogger,
+      Map<String, String> auditMap) {
+    switch (result) {
+    case SUCCESS:
+      if (LOG.isDebugEnabled()) {
+        if (operationResult) {
+          LOG.debug("Add acl: {} to path: {} success!", ozoneAcls, path);
+        } else {
+          LOG.debug("Acl {} already exists in path {}", ozoneAcls, path);
+        }
+      }
+      break;
+    case FAILURE:
+      LOG.error("Add acl {} to path {} failed!", ozoneAcls, path, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeyAddAclRequest: {}",
+          getOmRequest());
+    }
+
+    if (ozoneAcls != null) {
+      auditMap.put(OzoneConsts.ACL, ozoneAcls.toString());
+    }
+    auditLog(auditLogger,
+        buildAuditMessage(OMAction.ADD_ACL, auditMap, exception,
+            getOmRequest().getUserInfo()));
+  }
+
+  @Override boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) {
+    // No need to check not null here, this will be never called with null.
+    return omKeyInfo.addAcl(ozoneAcls.get(0));
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    ozoneManager.getMetrics().incNumAddAcl();
+    return super.validateAndUpdateCache(ozoneManager, trxnLogIndex,
+        omDoubleBufferHelper);
+  }
+
+  @Override OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult, boolean isDirectory) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setAddAclResponse(
+        OzoneManagerProtocolProtos.AddAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponseV1(omResponse.build(), omKeyInfo, isDirectory);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestV1.java
new file mode 100644
index 0000000..2d377d1
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestV1.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.request.key.acl;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Handle remove Acl request for bucket for layout version V1.
+ */
+public class OMKeyRemoveAclRequestV1 extends OMKeyAclRequestV1 {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMKeyRemoveAclRequestV1.class);
+
+  @Override public OzoneManagerProtocolProtos.OMRequest preExecute(
+      OzoneManager ozoneManager) throws IOException {
+    long modificationTime = Time.now();
+    OzoneManagerProtocolProtos.RemoveAclRequest.Builder
+        removeAclRequestBuilder =
+        getOmRequest().getRemoveAclRequest().toBuilder()
+            .setModificationTime(modificationTime);
+
+    return getOmRequest().toBuilder()
+        .setRemoveAclRequest(removeAclRequestBuilder).setUserInfo(getUserInfo())
+        .build();
+  }
+
+  private String path;
+  private List<OzoneAcl> ozoneAcls;
+  private OzoneObj obj;
+
+  public OMKeyRemoveAclRequestV1(
+      OzoneManagerProtocolProtos.OMRequest omRequest) {
+    super(omRequest);
+    OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest =
+        getOmRequest().getRemoveAclRequest();
+    obj = OzoneObjInfo.fromProtobuf(removeAclRequest.getObj());
+    path = obj.getPath();
+    ozoneAcls =
+        Lists.newArrayList(OzoneAcl.fromProtobuf(removeAclRequest.getAcl()));
+  }
+
+  @Override String getPath() {
+    return path;
+  }
+
+  @Override OzoneObj getObject() {
+    return obj;
+  }
+
+  @Override OzoneManagerProtocolProtos.OMResponse.Builder onInit() {
+    return OmResponseUtil.getOMResponseBuilder(getOmRequest());
+  }
+
+  @Override OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setRemoveAclResponse(
+        OzoneManagerProtocolProtos.RemoveAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponse(omResponse.build(), omKeyInfo);
+  }
+
+  @Override void onComplete(Result result, boolean operationResult,
+      IOException exception, long trxnLogIndex, AuditLogger auditLogger,
+      Map<String, String> auditMap) {
+    switch (result) {
+    case SUCCESS:
+      if (LOG.isDebugEnabled()) {
+        if (operationResult) {
+          LOG.debug("Remove acl: {} to path: {} success!", ozoneAcls, path);
+        } else {
+          LOG.debug("Acl {} not removed from path {} as it does not exist",
+              ozoneAcls, path);
+        }
+      }
+      break;
+    case FAILURE:
+      LOG.error("Remove acl {} to path {} failed!", ozoneAcls, path, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeyRemoveAclRequest: {}",
+          getOmRequest());
+    }
+
+    if (ozoneAcls != null) {
+      auditMap.put(OzoneConsts.ACL, ozoneAcls.toString());
+    }
+    auditLog(auditLogger,
+        buildAuditMessage(OMAction.REMOVE_ACL, auditMap, exception,
+            getOmRequest().getUserInfo()));
+  }
+
+  @Override boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) {
+    // No need to check not null here, this will be never called with null.
+    return omKeyInfo.removeAcl(ozoneAcls.get(0));
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    ozoneManager.getMetrics().incNumRemoveAcl();
+    return super.validateAndUpdateCache(ozoneManager, trxnLogIndex,
+        omDoubleBufferHelper);
+  }
+
+  @Override OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult, boolean isDirectory) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setRemoveAclResponse(
+        OzoneManagerProtocolProtos.RemoveAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponseV1(omResponse.build(), omKeyInfo, isDirectory);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestV1.java
new file mode 100644
index 0000000..6b6811e
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestV1.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.request.key.acl;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Handle set Acl request for bucket for layout version V1.
+ */
+public class OMKeySetAclRequestV1 extends OMKeyAclRequestV1 {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMKeySetAclRequestV1.class);
+
+  @Override public OzoneManagerProtocolProtos.OMRequest preExecute(
+      OzoneManager ozoneManager) throws IOException {
+    long modificationTime = Time.now();
+    OzoneManagerProtocolProtos.SetAclRequest.Builder setAclRequestBuilder =
+        getOmRequest().getSetAclRequest().toBuilder()
+            .setModificationTime(modificationTime);
+
+    return getOmRequest().toBuilder().setSetAclRequest(setAclRequestBuilder)
+        .setUserInfo(getUserInfo()).build();
+  }
+
+  private String path;
+  private List<OzoneAcl> ozoneAcls;
+  private OzoneObj obj;
+
+  public OMKeySetAclRequestV1(OzoneManagerProtocolProtos.OMRequest omRequest) {
+    super(omRequest);
+    OzoneManagerProtocolProtos.SetAclRequest setAclRequest =
+        getOmRequest().getSetAclRequest();
+    obj = OzoneObjInfo.fromProtobuf(setAclRequest.getObj());
+    path = obj.getPath();
+    ozoneAcls = Lists
+        .newArrayList(OzoneAclUtil.fromProtobuf(setAclRequest.getAclList()));
+  }
+
+  @Override String getPath() {
+    return path;
+  }
+
+  @Override OzoneObj getObject() {
+    return obj;
+  }
+
+  @Override OzoneManagerProtocolProtos.OMResponse.Builder onInit() {
+    return OmResponseUtil.getOMResponseBuilder(getOmRequest());
+  }
+
+  @Override OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setSetAclResponse(
+        OzoneManagerProtocolProtos.SetAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponse(omResponse.build(), omKeyInfo);
+  }
+
+  @Override void onComplete(Result result, boolean operationResult,
+      IOException exception, long trxnLogIndex, AuditLogger auditLogger,
+      Map<String, String> auditMap) {
+    switch (result) {
+    case SUCCESS:
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, path);
+      }
+      break;
+    case FAILURE:
+      LOG.error("Set acl {} to path {} failed!", ozoneAcls, path, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeySetAclRequest: {}",
+          getOmRequest());
+    }
+
+    if (ozoneAcls != null) {
+      auditMap.put(OzoneConsts.ACL, ozoneAcls.toString());
+    }
+    auditLog(auditLogger,
+        buildAuditMessage(OMAction.SET_ACL, auditMap, exception,
+            getOmRequest().getUserInfo()));
+  }
+
+  @Override boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) {
+    // No need to check not null here, this will be never called with null.
+    return omKeyInfo.setAcls(ozoneAcls);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    ozoneManager.getMetrics().incNumSetAcl();
+    return super.validateAndUpdateCache(ozoneManager, trxnLogIndex,
+        omDoubleBufferHelper);
+  }
+
+  @Override OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult, boolean isDirectory) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setSetAclResponse(
+        OzoneManagerProtocolProtos.SetAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponseV1(omResponse.build(), omKeyInfo, isDirectory);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
index 2bbeae0..299c063 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
@@ -64,5 +64,8 @@ public class OMKeyAclResponse extends OMClientResponse {
         omKeyInfo);
   }
 
+  public OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponseV1.java
new file mode 100644
index 0000000..046a024
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponseV1.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.response.key.acl;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.jetbrains.annotations.NotNull;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+
+/**
+ * Response for Bucket acl request for layout version V1.
+ */
+@CleanupTableInfo(cleanupTables = { FILE_TABLE, DIRECTORY_TABLE })
+public class OMKeyAclResponseV1 extends OMKeyAclResponse {
+
+  private boolean isDirectory;
+
+  public OMKeyAclResponseV1(
+      @NotNull OzoneManagerProtocolProtos.OMResponse omResponse,
+      @NotNull OmKeyInfo omKeyInfo, boolean isDirectory) {
+    super(omResponse, omKeyInfo);
+    this.isDirectory = isDirectory;
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   *
+   * @param omResponse
+   */
+  public OMKeyAclResponseV1(
+      @NotNull OzoneManagerProtocolProtos.OMResponse omResponse) {
+    super(omResponse);
+  }
+
+  @Override public void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    String ozoneDbKey = omMetadataManager
+        .getOzonePathKey(getOmKeyInfo().getParentObjectID(),
+            getOmKeyInfo().getFileName());
+    if (isDirectory) {
+      OmDirectoryInfo dirInfo = OMFileRequest.getDirectoryInfo(getOmKeyInfo());
+      omMetadataManager.getDirectoryTable()
+          .putWithBatch(batchOperation, ozoneDbKey, dirInfo);
+    } else {
+      omMetadataManager.getKeyTable()
+          .putWithBatch(batchOperation, ozoneDbKey, getOmKeyInfo());
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java
index 470cf60..0e28e4b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java
@@ -20,7 +20,9 @@ package org.apache.hadoop.ozone.om.request.key;
 import java.util.List;
 import java.util.UUID;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequest;
@@ -46,16 +48,18 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
     // Manually add volume, bucket and key to DB
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName,
-        keyName, clientID, replicationType, replicationFactor, 1L,
-        omMetadataManager);
+    String ozoneKey = addKeyToTable();
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
+
+    // As we added manually to key table.
+    Assert.assertNotNull(omKeyInfo);
 
     OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
 
     // Create KeyAddAcl request
     OMRequest originalRequest = createAddAclkeyRequest(acl);
-    OMKeyAddAclRequest omKeyAddAclRequest = new OMKeyAddAclRequest(
-        originalRequest);
+    OMKeyAclRequest omKeyAddAclRequest = getOmKeyAddAclRequest(originalRequest);
     OMRequest preExecuteRequest = omKeyAddAclRequest.preExecute(ozoneManager);
 
     // When preExecute() of adding acl,
@@ -68,7 +72,7 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
 
     // Execute original request
     OMClientResponse omClientResponse = omKeyAddAclRequest
-        .validateAndUpdateCache(ozoneManager, 2,
+        .validateAndUpdateCache(ozoneManager, 100L,
             ozoneManagerDoubleBufferHelper);
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
@@ -79,19 +83,22 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
   public void testKeyRemoveAclRequest() throws Exception {
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName,
-        keyName, clientID, replicationType, replicationFactor, 1L,
-        omMetadataManager);
+    String ozoneKey = addKeyToTable();
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
+
+    // As we added manually to key table.
+    Assert.assertNotNull(omKeyInfo);
 
     OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
 
     // Add acl.
     OMRequest addAclRequest = createAddAclkeyRequest(acl);
-    OMKeyAddAclRequest omKeyAddAclRequest =
-        new OMKeyAddAclRequest(addAclRequest);
+    OMKeyAclRequest omKeyAddAclRequest =
+        getOmKeyAddAclRequest(addAclRequest);
     omKeyAddAclRequest.preExecute(ozoneManager);
     OMClientResponse omClientAddAclResponse = omKeyAddAclRequest
-        .validateAndUpdateCache(ozoneManager, 1,
+        .validateAndUpdateCache(ozoneManager, 100L,
             ozoneManagerDoubleBufferHelper);
     OMResponse omAddAclResponse = omClientAddAclResponse.getOMResponse();
     Assert.assertNotNull(omAddAclResponse.getAddAclResponse());
@@ -99,8 +106,6 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
         omAddAclResponse.getStatus());
 
     // Verify result of adding acl.
-    String ozoneKey = omMetadataManager
-        .getOzoneKey(volumeName, bucketName, keyName);
     List<OzoneAcl> keyAcls = omMetadataManager.getKeyTable().get(ozoneKey)
         .getAcls();
     Assert.assertEquals(1, keyAcls.size());
@@ -108,8 +113,8 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
 
     // Remove acl.
     OMRequest removeAclRequest = createRemoveAclKeyRequest(acl);
-    OMKeyRemoveAclRequest omKeyRemoveAclRequest =
-        new OMKeyRemoveAclRequest(removeAclRequest);
+    OMKeyAclRequest omKeyRemoveAclRequest =
+        getOmKeyRemoveAclRequest(removeAclRequest);
     OMRequest preExecuteRequest = omKeyRemoveAclRequest
         .preExecute(ozoneManager);
 
@@ -122,7 +127,7 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
     Assert.assertTrue(newModTime > originModTime);
 
     OMClientResponse omClientRemoveAclResponse = omKeyRemoveAclRequest
-        .validateAndUpdateCache(ozoneManager, 2,
+        .validateAndUpdateCache(ozoneManager, 100L,
             ozoneManagerDoubleBufferHelper);
     OMResponse omRemoveAclResponse = omClientRemoveAclResponse.getOMResponse();
     Assert.assertNotNull(omRemoveAclResponse.getRemoveAclResponse());
@@ -139,15 +144,18 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
   public void testKeySetAclRequest() throws Exception {
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName,
-        keyName, clientID, replicationType, replicationFactor, 1L,
-        omMetadataManager);
+    String ozoneKey = addKeyToTable();
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
+
+    // As we added manually to key table.
+    Assert.assertNotNull(omKeyInfo);
 
     OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
 
     OMRequest setAclRequest = createSetAclKeyRequest(acl);
-    OMKeySetAclRequest omKeySetAclRequest =
-        new OMKeySetAclRequest(setAclRequest);
+    OMKeyAclRequest omKeySetAclRequest =
+        getOmKeySetAclRequest(setAclRequest);
     OMRequest preExecuteRequest = omKeySetAclRequest.preExecute(ozoneManager);
 
     // When preExecute() of setting acl,
@@ -159,7 +167,7 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
     Assert.assertTrue(newModTime > originModTime);
 
     OMClientResponse omClientResponse = omKeySetAclRequest
-        .validateAndUpdateCache(ozoneManager, 1,
+        .validateAndUpdateCache(ozoneManager, 100L,
             ozoneManagerDoubleBufferHelper);
     OMResponse omSetAclResponse = omClientResponse.getOMResponse();
     Assert.assertNotNull(omSetAclResponse.getSetAclResponse());
@@ -167,8 +175,6 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
         omSetAclResponse.getStatus());
 
     // Verify result of setting acl.
-    String ozoneKey = omMetadataManager
-        .getOzoneKey(volumeName, bucketName, keyName);
     List<OzoneAcl> newAcls = omMetadataManager.getKeyTable().get(ozoneKey)
         .getAcls();
     Assert.assertEquals(newAcls.get(0), acl);
@@ -177,7 +183,7 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
   /**
    * Create OMRequest which encapsulates OMKeyAddAclRequest.
    */
-  private OMRequest createAddAclkeyRequest(OzoneAcl acl) {
+  protected OMRequest createAddAclkeyRequest(OzoneAcl acl) {
     OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
         .setBucketName(bucketName)
         .setVolumeName(volumeName)
@@ -233,4 +239,27 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest {
         .setSetAclRequest(setAclRequest)
         .build();
   }
+
+  protected String addKeyToTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName,
+        keyName, clientID, replicationType, replicationFactor, 1L,
+        omMetadataManager);
+
+    return omMetadataManager.getOzoneKey(volumeName, bucketName,
+        keyName);
+  }
+
+  protected OMKeyAclRequest getOmKeyAddAclRequest(OMRequest originalRequest) {
+    return new OMKeyAddAclRequest(
+        originalRequest);
+  }
+
+  protected OMKeyAclRequest getOmKeyRemoveAclRequest(
+      OMRequest removeAclRequest) {
+    return new OMKeyRemoveAclRequest(removeAclRequest);
+  }
+
+  protected OMKeyAclRequest getOmKeySetAclRequest(OMRequest setAclRequest) {
+    return new OMKeySetAclRequest(setAclRequest);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestV1.java
new file mode 100644
index 0000000..12e745c
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestV1.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAclRequest;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequestV1;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestV1;;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequestV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+
+/**
+ * Test Key ACL requests for layout version V1.
+ */
+public class TestOMKeyAclRequestV1 extends TestOMKeyAclRequest {
+
+  protected String addKeyToTable() throws Exception {
+    String parentDir = "c/d/e";
+    String fileName = "file1";
+    String key = parentDir + "/" + fileName;
+    keyName = key; // updated key name
+
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils
+        .addParentsToDirTable(volumeName, bucketName, parentDir,
+            omMetadataManager);
+
+    OmKeyInfo omKeyInfo = TestOMRequestUtils
+        .createOmKeyInfo(volumeName, bucketName, key,
+            HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE,
+            parentId + 1, parentId, 100, Time.now());
+    TestOMRequestUtils
+        .addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50,
+            omMetadataManager);
+    return omKeyInfo.getPath();
+  }
+
+  @Override protected OMKeyAclRequest getOmKeyAddAclRequest(
+      OzoneManagerProtocolProtos.OMRequest originalRequest) {
+    return new OMKeyAddAclRequestV1(originalRequest);
+  }
+
+  @Override protected OMKeyAclRequest getOmKeyRemoveAclRequest(
+      OzoneManagerProtocolProtos.OMRequest removeAclRequest) {
+    return new OMKeyRemoveAclRequestV1(removeAclRequest);
+  }
+
+  @Override protected OMKeyAclRequest getOmKeySetAclRequest(
+      OzoneManagerProtocolProtos.OMRequest setAclRequest) {
+    return new OMKeySetAclRequestV1(setAclRequest);
+  }
+
+  @Override protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+}

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 25/29: HDDS-4790. Add a tool to parse entries in the prefix format (#1891)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 188861feb0e4e7c76b30195594c09ec0b4eb7679
Author: Mukul Kumar Singh <ms...@apache.org>
AuthorDate: Mon Mar 15 16:23:51 2021 +0530

    HDDS-4790. Add a tool to parse entries in the prefix format (#1891)
---
 .../hdds/scm/pipeline/SCMPipelineManager.java      |   6 +-
 .../hadoop/ozone/om/helpers/OmDirectoryInfo.java   |   4 +-
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |  25 +--
 .../ozone/om/helpers/WithParentObjectId.java       |  55 +++++
 .../fs/ozone/TestOzoneFileSystemPrefixParser.java  | 180 ++++++++++++++++
 .../apache/hadoop/ozone/debug/PrefixParser.java    | 233 +++++++++++++++++++++
 6 files changed, 474 insertions(+), 29 deletions(-)

diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
index b22feab..3487b12 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
@@ -710,8 +710,10 @@ public class SCMPipelineManager implements
     pipelineFactory.shutdown();
     lock.writeLock().lock();
     try {
-      pipelineStore.close();
-      pipelineStore = null;
+      if (pipelineStore != null) {
+        pipelineStore.close();
+        pipelineStore = null;
+      }
     } catch (Exception ex) {
       LOG.error("Pipeline  store close failed", ex);
     } finally {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
index 4c82047..3d5d6a5 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
@@ -28,9 +28,7 @@ import java.util.*;
  * in the user given path and a pointer to its parent directory element in the
  * path. Also, it stores directory node related metdata details.
  */
-public class OmDirectoryInfo extends WithObjectID {
-  private long parentObjectID; // pointer to parent directory
-
+public class OmDirectoryInfo extends WithParentObjectId {
   private String name; // directory name
 
   private long creationTime;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index be68d9b..dd67cc1 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -42,7 +42,7 @@ import com.google.common.base.Preconditions;
  * This is returned from OM to client, and client use class to talk to
  * datanode. Also, this is the metadata written to om.db on server side.
  */
-public final class OmKeyInfo extends WithObjectID {
+public final class OmKeyInfo extends WithParentObjectId {
   private final String volumeName;
   private final String bucketName;
   // name of key client specified
@@ -56,29 +56,6 @@ public final class OmKeyInfo extends WithObjectID {
   private FileEncryptionInfo encInfo;
 
   /**
-   * A pointer to parent directory used for path traversal. ParentID will be
-   * used only when the key is created into a FileSystemOptimized(FSO) bucket.
-   * <p>
-   * For example, if a key "a/b/key1" created into a FSOBucket then each
-   * path component will be assigned an ObjectId and linked to its parent path
-   * component using parent's objectID.
-   * <p>
-   * Say, Bucket's ObjectID = 512, which is the parent for its immediate child
-   * element.
-   * <p>
-   * ------------------------------------------|
-   * PathComponent |   ObjectID   |   ParentID |
-   * ------------------------------------------|
-   *      a        |     1024     |     512    |
-   * ------------------------------------------|
-   *      b        |     1025     |     1024   |
-   * ------------------------------------------|
-   *     key1      |     1026     |     1025   |
-   * ------------------------------------------|
-   */
-  private long parentObjectID;
-
-  /**
    * Represents leaf node name. This also will be used when the keyName is
    * created on a FileSystemOptimized(FSO) bucket. For example, the user given
    * keyName is "a/b/key1" then the fileName stores "key1".
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java
new file mode 100644
index 0000000..79a135a
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+/**
+ * Object ID with additional parent ID field.
+ */
+public class WithParentObjectId extends WithObjectID {
+  /**
+   * Object ID with additional parent ID field.
+   *
+   * A pointer to parent directory used for path traversal. ParentID will be
+   * used only when the key is created into a FileSystemOptimized(FSO) bucket.
+   * <p>
+   * For example, if a key "a/b/key1" created into a FSOBucket then each
+   * path component will be assigned an ObjectId and linked to its parent path
+   * component using parent's objectID.
+   * <p>
+   * Say, Bucket's ObjectID = 512, which is the parent for its immediate child
+   * element.
+   * <p>
+   * ------------------------------------------|
+   * PathComponent |   ObjectID   |   ParentID |
+   * ------------------------------------------|
+   *      a        |     1024     |     512    |
+   * ------------------------------------------|
+   *      b        |     1025     |     1024   |
+   * ------------------------------------------|
+   *     key1      |     1026     |     1025   |
+   * ------------------------------------------|
+   */
+  @SuppressWarnings("visibilitymodifier")
+  protected long parentObjectID;
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java
new file mode 100644
index 0000000..3f18fae
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.debug.PrefixParser;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.net.URI;
+
+/**
+ * Test Ozone Prefix Parser.
+ */
+public class TestOzoneFileSystemPrefixParser {
+
+  private MiniOzoneCluster cluster = null;
+
+  private FileSystem fs;
+
+  private String volumeName;
+
+  private String bucketName;
+
+  private OzoneConfiguration configuration;
+
+  @Before
+  public void init() throws Exception {
+    volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
+    bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
+
+    configuration = new OzoneConfiguration();
+
+    TestOMRequestUtils.configureFSOptimizedPaths(configuration,
+        true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+
+    cluster = MiniOzoneCluster.newBuilder(configuration)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
+
+    String rootPath = String
+        .format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName,
+            volumeName);
+    fs = FileSystem.get(new URI(rootPath + "/test.txt"), configuration);
+  }
+
+  @After
+  public void teardown() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  @Test
+  public void testPrefixParseDir() throws Exception {
+    Path dir = new Path("/a/b/c/d/e");
+    fs.mkdirs(dir);
+    Path file = new Path("/a/b/c/file1");
+    FSDataOutputStream os = fs.create(file);
+    os.close();
+
+    cluster.stop();
+    PrefixParser parser = new PrefixParser();
+
+    parser.parse(volumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        dir.getParent().getParent().toString());
+
+    assertPrefixStats(parser, 1, 1, 3, 0, 1, 1);
+  }
+
+  @Test
+  public void testPrefixParseFile() throws Exception {
+    Path dir = new Path("/a/b/c/d/e");
+    fs.mkdirs(dir);
+    Path file = new Path("/a/b/file1");
+    FSDataOutputStream os = fs.create(file);
+    os.close();
+
+    cluster.stop();
+    PrefixParser parser = new PrefixParser();
+
+    parser.parse(volumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        file.toString());
+
+    assertPrefixStats(parser, 1, 1, 2, 1, 1, 1);
+  }
+
+  private void assertPrefixStats(PrefixParser parser, int volumeCount,
+      int bucketCount, int intermediateDirCount, int nonExistentDirCount,
+      int fileCount, int dirCount) {
+    Assert.assertEquals(volumeCount,
+        parser.getParserStats(PrefixParser.Types.VOLUME));
+    Assert.assertEquals(bucketCount,
+        parser.getParserStats(PrefixParser.Types.BUCKET));
+    Assert.assertEquals(intermediateDirCount,
+        parser.getParserStats(PrefixParser.Types.INTERMEDIATE_DIRECTORY));
+    Assert.assertEquals(nonExistentDirCount,
+        parser.getParserStats(PrefixParser.Types.NON_EXISTENT_DIRECTORY));
+    Assert.assertEquals(fileCount,
+        parser.getParserStats(PrefixParser.Types.FILE));
+    Assert.assertEquals(dirCount,
+        parser.getParserStats(PrefixParser.Types.DIRECTORY));
+  }
+
+  @Test
+  public void testPrefixParseWithInvalidPaths() throws Exception {
+    Path dir = new Path("/a/b/c/d/e");
+    fs.mkdirs(dir);
+    Path file = new Path("/a/b/file1");
+    FSDataOutputStream os = fs.create(file);
+    os.close();
+
+    cluster.stop();
+
+    PrefixParser invalidVolumeParser = new PrefixParser();
+    String invalidVolumeName =
+        RandomStringUtils.randomAlphabetic(10).toLowerCase();
+    invalidVolumeParser.parse(invalidVolumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        file.toString());
+    assertPrefixStats(invalidVolumeParser, 0, 0, 0, 0, 0, 0);
+
+    PrefixParser invalidBucketParser = new PrefixParser();
+    String invalidBucketName =
+        RandomStringUtils.randomAlphabetic(10).toLowerCase();
+    invalidBucketParser.parse(volumeName, invalidBucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        file.toString());
+    assertPrefixStats(invalidBucketParser, 1, 0, 0, 0, 0, 0);
+
+
+    Path invalidIntermediateDir = new Path(file.getParent(), "xyz");
+    PrefixParser invalidIntermediateDirParser = new PrefixParser();
+    invalidIntermediateDirParser.parse(volumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        invalidIntermediateDir.toString());
+
+    assertPrefixStats(invalidIntermediateDirParser, 1, 1, 2, 1, 1, 1);
+
+  }
+
+
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
new file mode 100644
index 0000000..4c257dc
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import java.nio.file.Path;
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.*;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.kohsuke.MetaInfServices;
+import picocli.CommandLine;
+import picocli.CommandLine.Model.CommandSpec;
+import picocli.CommandLine.Spec;
+
+/**
+ * Tool that parses OM db file for prefix table.
+ */
+@CommandLine.Command(
+    name = "prefix",
+    description = "Parse prefix contents")
+@MetaInfServices(SubcommandWithParent.class)
+public class PrefixParser implements Callable<Void>, SubcommandWithParent {
+
+  public enum Types {
+    VOLUME,
+    BUCKET,
+    FILE,
+    DIRECTORY,
+    INTERMEDIATE_DIRECTORY,
+    NON_EXISTENT_DIRECTORY,
+  }
+
+  private final int[] parserStats = new int[Types.values().length];
+
+  @Spec
+  private CommandSpec spec;
+
+  @CommandLine.Option(names = {"--db"},
+      required = true,
+      description = "Database File Path")
+  private String dbPath;
+
+  @CommandLine.Option(names = {"--path"},
+      required = true,
+      description = "prefixFile Path")
+  private String filePath;
+
+  @CommandLine.Option(names = {"--bucket"},
+      required = true,
+      description = "bucket name")
+  private String bucket;
+
+  @CommandLine.Option(names = {"--volume"},
+      required = true,
+      description = "volume name")
+  private String volume;
+
+  public String getDbPath() {
+    return dbPath;
+  }
+
+  public void setDbPath(String dbPath) {
+    this.dbPath = dbPath;
+  }
+
+  @Override
+  public Class<?> getParentType() {
+    return OzoneDebug.class;
+  }
+
+  @Override
+  public Void call() throws Exception {
+    parse(volume, bucket, dbPath, filePath);
+    return null;
+  }
+
+  public static void main(String[] args) throws Exception {
+    new PrefixParser().call();
+  }
+
+  public void parse(String vol, String buck, String db,
+                    String file) throws Exception {
+    if (!Files.exists(Paths.get(db))) {
+      System.out.println("DB path not exist:" + db);
+      return;
+    }
+
+    System.out.println("FilePath is:" + file);
+    System.out.println("Db Path is:" + db);
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, db);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+
+    OmMetadataManagerImpl metadataManager =
+        new OmMetadataManagerImpl(conf);
+    metadataManager.start(conf);
+
+    org.apache.hadoop.fs.Path effectivePath =
+        new org.apache.hadoop.fs.Path("/");
+
+    Path p = Paths.get(file);
+
+    String volumeKey = metadataManager.getVolumeKey(vol);
+    if (!metadataManager.getVolumeTable().isExist(volumeKey)) {
+      System.out.println("Invalid Volume:" + vol);
+      metadataManager.stop();
+      return;
+    }
+
+    parserStats[Types.VOLUME.ordinal()]++;
+    // First get the info about the bucket
+    String bucketKey = metadataManager.getBucketKey(vol, buck);
+    OmBucketInfo info = metadataManager.getBucketTable().get(bucketKey);
+    if (info == null) {
+      System.out.println("Invalid Bucket:" + buck);
+      metadataManager.stop();
+      return;
+    }
+
+    long lastObjectId = info.getObjectID();
+    WithParentObjectId objectBucketId = new WithParentObjectId();
+    objectBucketId.setObjectID(lastObjectId);
+    dumpInfo(Types.BUCKET, effectivePath, objectBucketId, bucketKey);
+
+    Iterator<Path> pathIterator =  p.iterator();
+    while(pathIterator.hasNext()) {
+      Path elem = pathIterator.next();
+      String path =
+          metadataManager.getOzonePathKey(lastObjectId, elem.toString());
+      OmDirectoryInfo directoryInfo =
+          metadataManager.getDirectoryTable().get(path);
+
+      org.apache.hadoop.fs.Path tmpPath =
+          getEffectivePath(effectivePath, elem.toString());
+      if (directoryInfo == null) {
+        System.out.println("Given path contains a non-existent directory at:" +
+            tmpPath);
+        System.out.println("Dumping files and dirs at level:" +
+            tmpPath.getParent());
+        System.out.println();
+        parserStats[Types.NON_EXISTENT_DIRECTORY.ordinal()]++;
+        break;
+      }
+
+      effectivePath = tmpPath;
+
+      dumpInfo(Types.INTERMEDIATE_DIRECTORY, effectivePath,
+          directoryInfo, path);
+      lastObjectId = directoryInfo.getObjectID();
+    }
+
+    // at the last level, now parse both file and dir table
+    dumpTableInfo(Types.DIRECTORY, effectivePath,
+        metadataManager.getDirectoryTable(), lastObjectId);
+
+    dumpTableInfo(Types.FILE, effectivePath,
+        metadataManager.getKeyTable(), lastObjectId);
+    metadataManager.stop();
+  }
+
+  private void dumpTableInfo(Types type,
+      org.apache.hadoop.fs.Path effectivePath,
+      Table<String, ? extends WithParentObjectId> table, long lastObjectId)
+      throws IOException {
+    MetadataKeyFilters.KeyPrefixFilter filter = getPrefixFilter(lastObjectId);
+
+    List<? extends KeyValue
+        <String, ? extends WithParentObjectId>> infoList =
+        table.getRangeKVs(null, 1000, filter);
+
+    for (KeyValue<String, ? extends WithParentObjectId> info :infoList) {
+      Path key = Paths.get(info.getKey());
+      dumpInfo(type, getEffectivePath(effectivePath,
+          key.getName(1).toString()), info.getValue(), info.getKey());
+    }
+  }
+
+  private org.apache.hadoop.fs.Path getEffectivePath(
+      org.apache.hadoop.fs.Path currentPath, String name) {
+    return new org.apache.hadoop.fs.Path(currentPath, name);
+  }
+
+  private void dumpInfo(Types level, org.apache.hadoop.fs.Path effectivePath,
+                        WithParentObjectId id,  String key) {
+    parserStats[level.ordinal()]++;
+    System.out.println("Type:" + level);
+    System.out.println("Path: " + effectivePath);
+    System.out.println("DB Path: " + key);
+    System.out.println("Object Id: " + id.getObjectID());
+    System.out.println("Parent object Id: " + id.getParentObjectID());
+    System.out.println();
+
+  }
+
+  private static MetadataKeyFilters.KeyPrefixFilter getPrefixFilter(long id) {
+    return (new MetadataKeyFilters.KeyPrefixFilter())
+        .addFilter(Long.toString(id));
+  }
+
+  public int getParserStats(Types type) {
+    return parserStats[type.ordinal()];
+  }
+}

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 04/29: HDDS-4357: Rename : make rename an atomic ops by updating key path entry in dir/file table (#1557)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 623da7bbc12eaa93db56805fe062f15cad54b185
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Thu Nov 19 21:23:45 2020 +0530

    HDDS-4357: Rename : make rename an atomic ops by updating key path entry in dir/file table (#1557)
    
    HDDS-4357: Rename : make rename an atomic ops by updating key path entry in dir/file table
---
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   2 +
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |   4 +
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  43 +++
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       | 278 ++++++++++++++++++++
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 173 +++++++++++-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  27 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |   2 +
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../om/request/bucket/OMBucketCreateRequest.java   |  24 ++
 .../ozone/om/request/file/OMFileRequest.java       |  94 +++++++
 .../ozone/om/request/key/OMKeyRenameRequestV1.java | 292 +++++++++++++++++++++
 .../ozone/om/response/key/OMKeyRenameResponse.java |  11 +
 ...ameResponse.java => OMKeyRenameResponseV1.java} |  68 +++--
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |   5 +
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  13 +
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |   7 +
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java |   1 +
 17 files changed, 1001 insertions(+), 47 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 34a1064..71344f9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -251,4 +251,6 @@ public final class OMConfigKeys {
   public static final String OZONE_OM_LAYOUT_VERSION =
           "ozone.om.layout.version";
   public static final String OZONE_OM_LAYOUT_VERSION_DEFAULT = "V0";
+
+  public static final String OZONE_OM_LAYOUT_VERSION_V1 = "V1";
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index b6fb404..d097714 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -286,6 +286,10 @@ public final class OmKeyInfo extends WithObjectID {
     return OzoneAclUtil.setAcl(acls, newAcls);
   }
 
+  public void setParentObjectID(long parentObjectID) {
+    this.parentObjectID = parentObjectID;
+  }
+
   /**
    * Builder of OmKeyInfo.
    */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index 63bfd8f..e9d4cf9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -162,4 +162,47 @@ public final class OzoneFSUtils {
 
     return parentPath.equals(childParent);
   }
+
+  /**
+   * The function returns parent directory from the given absolute path. For
+   * example, the given key path '/a/b/c/d/e/file1' then it returns parent
+   * directory name as 'e'.
+   *
+   * @param keyName key name
+   * @return parent directory. If not found then return keyName itself.
+   */
+  public static String getParentDir(@Nonnull String keyName) {
+    java.nio.file.Path fileName = Paths.get(keyName).getParent();
+    if (fileName != null) {
+      return fileName.toString();
+    }
+    // failed to find a parent directory.
+    return keyName;
+  }
+
+  /**
+   * This function appends the given file name to the given key name path.
+   *
+   * @param keyName key name
+   * @param fileName  file name
+   * @return full path
+   */
+  public static String appendFileNameToKeyPath(String keyName,
+                                               String fileName) {
+    StringBuilder newToKeyName = new StringBuilder(keyName);
+    newToKeyName.append(OZONE_URI_DELIMITER);
+    newToKeyName.append(fileName);
+    return newToKeyName.toString();
+  }
+
+  /**
+   * Returns the number of path components in the given keyName.
+   *
+   * @param keyName keyname
+   * @return path components count
+   */
+  public static int getFileCount(String keyName) {
+    java.nio.file.Path keyPath = Paths.get(keyName);
+    return keyPath.getNameCount();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 05b8d8b..2c1e643 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -630,6 +630,16 @@ public class TestOzoneFileSystem {
       stream.seek(fileLength);
       assertEquals(-1, stream.read());
     }
+
+    // non-existent file
+    Path fileNotExists = new Path("/file_notexist");
+    try {
+      fs.open(fileNotExists);
+      Assert.fail("Should throw FILE_NOT_FOUND error as file doesn't exist!");
+    } catch (FileNotFoundException fnfe) {
+      Assert.assertTrue("Expected FILE_NOT_FOUND error",
+              fnfe.getMessage().contains("FILE_NOT_FOUND"));
+    }
   }
 
   @Test
@@ -664,9 +674,276 @@ public class TestOzoneFileSystem {
         interimPath.getName(), fileStatus.getPath().getName());
   }
 
+  /**
+   * Case-1) fromKeyName should exist, otw throws exception.
+   */
+  @Test
+  public void testRenameWithNonExistentSource() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final String dir2 = root + "/dir2";
+    final Path source = new Path(fs.getUri().toString() + dir1);
+    final Path destin = new Path(fs.getUri().toString() + dir2);
+
+    // creates destin
+    fs.mkdirs(destin);
+    LOG.info("Created destin dir: {}", destin);
+
+    LOG.info("Rename op-> source:{} to destin:{}}", source, destin);
+    assertFalse("Expected to fail rename as src doesn't exist",
+            fs.rename(source, destin));
+  }
+
+  /**
+   * Case-2) Cannot rename a directory to its own subdirectory.
+   */
+  @Test
+  public void testRenameDirToItsOwnSubDir() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final Path dir1Path = new Path(fs.getUri().toString() + dir1);
+    // Add a sub-dir1 to the directory to be moved.
+    final Path subDir1 = new Path(dir1Path, "sub_dir1");
+    fs.mkdirs(subDir1);
+    LOG.info("Created dir1 {}", subDir1);
+
+    final Path sourceRoot = new Path(fs.getUri().toString() + root);
+    LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
+    try {
+      fs.rename(sourceRoot, subDir1);
+      Assert.fail("Should throw exception : Cannot rename a directory to" +
+              " its own subdirectory");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+  }
+
+  /**
+   * Case-3) If src == destin then check source and destin of same type.
+   */
+  @Test
+  public void testRenameSourceAndDestinAreSame() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2Path = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2Path);
+
+    // File rename
+    Path file1 = new Path(fs.getUri().toString() + dir2 + "/file1");
+    ContractTestUtils.touch(fs, file1);
+
+    assertTrue(fs.rename(file1, file1));
+    assertTrue(fs.rename(dir2Path, dir2Path));
+  }
+
+  /**
+   * Case-4) Rename from /a, to /b.
+   * <p>
+   * Expected Result: After rename the directory structure will be /b/a.
+   */
+  @Test
+  public void testRenameToExistingDir() throws Exception {
+    // created /a
+    final Path aSourcePath = new Path(fs.getUri().toString() + "/a");
+    fs.mkdirs(aSourcePath);
+
+    // created /b
+    final Path bDestinPath = new Path(fs.getUri().toString() + "/b");
+    fs.mkdirs(bDestinPath);
+
+    // Add a sub-directory '/a/c' to '/a'. This is to verify that after
+    // rename sub-directory also be moved.
+    final Path acPath = new Path(fs.getUri().toString() + "/a/c");
+    fs.mkdirs(acPath);
+
+    // Rename from /a to /b.
+    assertTrue("Rename failed", fs.rename(aSourcePath, bDestinPath));
+
+    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
+    final Path bacPath = new Path(fs.getUri().toString() + "/b/a/c");
+    assertTrue("Rename failed", fs.exists(baPath));
+    assertTrue("Rename failed", fs.exists(bacPath));
+  }
+
+  /**
+   * Case-5) If new destin '/dst/source' exists then throws exception.
+   * If destination is a directory then rename source as sub-path of it.
+   * <p>
+   * For example: rename /a to /b will lead to /b/a. This new path should
+   * not exist.
+   */
+  @Test
+  public void testRenameToNewSubDirShouldNotExist() throws Exception {
+    // Case-5.a) Rename directory from /a to /b.
+    // created /a
+    final Path aSourcePath = new Path(fs.getUri().toString() + "/a");
+    fs.mkdirs(aSourcePath);
+
+    // created /b
+    final Path bDestinPath = new Path(fs.getUri().toString() + "/b");
+    fs.mkdirs(bDestinPath);
+
+    // Add a sub-directory '/b/a' to '/b'. This is to verify that rename
+    // throws exception as new destin /b/a already exists.
+    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
+    fs.mkdirs(baPath);
+
+    Assert.assertFalse("New destin sub-path /b/a already exists",
+            fs.rename(aSourcePath, bDestinPath));
+
+    // Case-5.b) Rename file from /a/b/c/file1 to /a.
+    // Should be failed since /a/file1 exists.
+    final Path abcPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcPath);
+    Path abcFile1 = new Path(abcPath, "/file1");
+    ContractTestUtils.touch(fs, abcFile1);
+
+    final Path aFile1 = new Path(fs.getUri().toString() + "/a/file1");
+    ContractTestUtils.touch(fs, aFile1);
+
+    final Path aDestinPath = new Path(fs.getUri().toString() + "/a");
+
+    Assert.assertFalse("New destin sub-path /b/a already exists",
+            fs.rename(abcFile1, aDestinPath));
+  }
+
+  /**
+   * Case-6) Rename directory to an existed file, should be failed.
+   */
+  @Test
+  public void testRenameDirToFile() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, file1Destin);
+    Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcRootPath);
+    Assert.assertFalse("key already exists /root_dir/file1",
+            fs.rename(abcRootPath, file1Destin));
+  }
+
+  /**
+   * Rename file to a non-existent destin file.
+   */
+  @Test
+  public void testRenameFile() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Source = new Path(fs.getUri().toString() + root
+            + "/file1_Copy");
+    ContractTestUtils.touch(fs, file1Source);
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    assertTrue("Renamed failed", fs.rename(file1Source, file1Destin));
+    assertTrue("Renamed failed: /root/file1", fs.exists(file1Destin));
+
+    /**
+     * Reading several times, this is to verify that OmKeyInfo#keyName cached
+     * entry is not modified. While reading back, OmKeyInfo#keyName will be
+     * prepared and assigned to fullkeyPath name.
+     */
+    for (int i = 0; i < 10; i++) {
+      FileStatus[] fStatus = fs.listStatus(rootPath);
+      assertEquals("Renamed failed", 1, fStatus.length);
+      assertEquals("Wrong path name!", file1Destin, fStatus[0].getPath());
+    }
+  }
+
+  /**
+   * Rename file to an existed directory.
+   */
+  @Test
+  public void testRenameFileToDir() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, file1Destin);
+    Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcRootPath);
+    assertTrue("Renamed failed", fs.rename(file1Destin, abcRootPath));
+    assertTrue("Renamed filed: /a/b/c/file1", fs.exists(new Path(abcRootPath,
+            "file1")));
+  }
+
+
+  /**
+   * Fails if the (a) parent of dst does not exist or (b) parent is a file.
+   */
+  @Test
+  public void testRenameDestinationParentDoesntExist() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2SourcePath);
+
+    // (a) parent of dst does not exist.  /root_dir/b/c
+    final Path destinPath = new Path(fs.getUri().toString() + root + "/b/c");
+    try {
+      fs.rename(dir2SourcePath, destinPath);
+      Assert.fail("Should fail as parent of dst does not exist!");
+    } catch (FileNotFoundException fnfe) {
+      // expected
+    }
+
+    // (b) parent of dst is a file. /root_dir/file1/c
+    Path filePath = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, filePath);
+
+    Path newDestinPath = new Path(filePath, "c");
+    try {
+      fs.rename(dir2SourcePath, newDestinPath);
+      Assert.fail("Should fail as parent of dst is a file!");
+    } catch (IOException ioe) {
+      // expected
+    }
+  }
+
+  /**
+   * Rename to the source's parent directory, it will succeed.
+   * 1. Rename from /root_dir/dir1/dir2 to /root_dir.
+   * Expected result : /root_dir/dir2
+   * <p>
+   * 2. Rename from /root_dir/dir1/file1 to /root_dir.
+   * Expected result : /root_dir/file1.
+   */
+  @Test
+  public void testRenameToParentDir() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2SourcePath);
+    final Path destRootPath = new Path(fs.getUri().toString() + root);
+
+    Path file1Source = new Path(fs.getUri().toString() + dir1 + "/file2");
+    ContractTestUtils.touch(fs, file1Source);
+
+    // rename source directory to its parent directory(destination).
+    assertTrue("Rename failed", fs.rename(dir2SourcePath, destRootPath));
+    final Path expectedPathAfterRename =
+            new Path(fs.getUri().toString() + root + "/dir2");
+    assertTrue("Rename failed",
+            fs.exists(expectedPathAfterRename));
+
+    // rename source file to its parent directory(destination).
+    assertTrue("Rename failed", fs.rename(file1Source, destRootPath));
+    final Path expectedFilePathAfterRename =
+            new Path(fs.getUri().toString() + root + "/file2");
+    assertTrue("Rename failed",
+            fs.exists(expectedFilePathAfterRename));
+  }
+
   @Test
   public void testRenameDir() throws Exception {
     final String dir = "/root_dir/dir1";
+    Path rootDir = new Path(fs.getUri().toString() +  "/root_dir");
     final Path source = new Path(fs.getUri().toString() + dir);
     final Path dest = new Path(source.toString() + ".renamed");
     // Add a sub-dir to the directory to be moved.
@@ -685,6 +962,7 @@ public class TestOzoneFileSystem {
     LambdaTestUtils.intercept(IllegalArgumentException.class, "Wrong FS",
         () -> fs.rename(new Path(fs.getUri().toString() + "fake" + dir), dest));
   }
+
   private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory)
       throws IOException {
     String key = o3fs.pathToKey(keyPath);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index 6868040..e2b7887 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
@@ -39,6 +40,7 @@ import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.junit.Assert;
@@ -329,12 +331,136 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   }
 
   /**
+   * Case-1) fromKeyName should exist, otw throws exception.
+   */
+  @Test
+  public void testRenameWithNonExistentSource() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final String dir2 = root + "/dir2";
+    final Path source = new Path(fs.getUri().toString() + dir1);
+    final Path destin = new Path(fs.getUri().toString() + dir2);
+
+    // creates destin
+    fs.mkdirs(destin);
+    LOG.info("Created destin dir: {}", destin);
+
+    LOG.info("Rename op-> source:{} to destin:{}}", source, destin);
+    try {
+      fs.rename(source, destin);
+      Assert.fail("Should throw exception : Source doesn't exist!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
+    }
+  }
+
+  /**
+   * Case-2) Cannot rename a directory to its own subdirectory.
+   */
+  @Test
+  public void testRenameDirToItsOwnSubDir() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final Path dir1Path = new Path(fs.getUri().toString() + dir1);
+    // Add a sub-dir1 to the directory to be moved.
+    final Path subDir1 = new Path(dir1Path, "sub_dir1");
+    fs.mkdirs(subDir1);
+    LOG.info("Created dir1 {}", subDir1);
+
+    final Path sourceRoot = new Path(fs.getUri().toString() + root);
+    LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
+    try {
+      fs.rename(sourceRoot, subDir1);
+      Assert.fail("Should throw exception : Cannot rename a directory to" +
+              " its own subdirectory");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
+    }
+  }
+
+  /**
+   * Case-5) If new destin '/dst/source' exists then throws exception.
+   * If destination is a directory then rename source as sub-path of it.
+   * <p>
+   * For example: rename /a to /b will lead to /b/a. This new path should
+   * not exist.
+   */
+  @Test
+  public void testRenameToNewSubDirShouldNotExist() throws Exception {
+    // Case-5.a) Rename directory from /a to /b.
+    // created /a
+    final Path aSourcePath = new Path(fs.getUri().toString() + "/a");
+    fs.mkdirs(aSourcePath);
+
+    // created /b
+    final Path bDestinPath = new Path(fs.getUri().toString() + "/b");
+    fs.mkdirs(bDestinPath);
+
+    // Add a sub-directory '/b/a' to '/b'. This is to verify that rename
+    // throws exception as new destin /b/a already exists.
+    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
+    fs.mkdirs(baPath);
+
+    try {
+      fs.rename(aSourcePath, bDestinPath);
+      Assert.fail("Should fail as new destination dir exists!");
+    } catch (OMException ome) {
+      // expected as new sub-path /b/a already exists.
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
+    }
+
+    // Case-5.b) Rename file from /a/b/c/file1 to /a.
+    // Should be failed since /a/file1 exists.
+    final Path abcPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcPath);
+    Path abcFile1 = new Path(abcPath, "/file1");
+    ContractTestUtils.touch(fs, abcFile1);
+
+    final Path aFile1 = new Path(fs.getUri().toString() + "/a/file1");
+    ContractTestUtils.touch(fs, aFile1);
+
+    final Path aDestinPath = new Path(fs.getUri().toString() + "/a");
+
+    try {
+      fs.rename(abcFile1, aDestinPath);
+      Assert.fail("Should fail as new destination file exists!");
+    } catch (OMException ome) {
+      // expected as new sub-path /b/a already exists.
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
+    }
+  }
+
+  /**
+   * Case-6) Rename directory to an existed file, should be failed.
+   */
+  @Test
+  public void testRenameDirToFile() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, file1Destin);
+    Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcRootPath);
+    try {
+      fs.rename(abcRootPath, file1Destin);
+      Assert.fail("key already exists /root_dir/file1");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
+    }
+  }
+
+  /**
    * Cleanup keyTable and directoryTable explicitly as FS delete operation
    * is not yet supported.
    *
    * @throws IOException DB failure
    */
-  protected void tableCleanup() throws IOException {
+  private void tableCleanup() throws IOException {
     OMMetadataManager metadataMgr = cluster.getOzoneManager()
             .getMetadataManager();
     TableIterator<String, ? extends
@@ -382,8 +508,8 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
       keyTableIterator.next();
     }
 
-    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
-            keyCacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
+            keyCacheIterator = metadataMgr.getKeyTable().cacheIterator();
     while(keyCacheIterator.hasNext()){
       keyCacheIterator.next();
       keyCacheIterator.remove();
@@ -401,6 +527,41 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     rootItemCount = 0;
   }
 
+  /**
+   * Fails if the (a) parent of dst does not exist or (b) parent is a file.
+   */
+  @Test
+  public void testRenameDestinationParentDoesntExist() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2SourcePath);
+
+    // (a) parent of dst does not exist.  /root_dir/b/c
+    final Path destinPath = new Path(fs.getUri().toString() + root + "/b/c");
+    try {
+      fs.rename(dir2SourcePath, destinPath);
+      Assert.fail("Should fail as parent of dst does not exist!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
+    }
+
+    // (b) parent of dst is a file. /root_dir/file1/c
+    Path filePath = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, filePath);
+
+    Path newDestinPath = new Path(filePath, "c");
+    try {
+      fs.rename(dir2SourcePath, newDestinPath);
+      Assert.fail("Should fail as parent of dst is a file!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
+    }
+  }
+
   @Override
   @Test
   @Ignore("TODO:HDDS-2939")
@@ -412,4 +573,10 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   @Ignore("TODO:HDDS-2939")
   public void testRenameToTrashEnabled() throws Exception {
   }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testListStatusWithIntermediateDir() throws Exception {
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 4a4327d..8aaca5f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2048,10 +2048,16 @@ public class KeyManagerImpl implements KeyManager {
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
-    OzoneFileStatus fileStatus = getOzoneFileStatus(volumeName, bucketName,
-            keyName, args.getRefreshPipeline(), args.getSortDatanodes(),
-            clientAddress);
-      //if key is not of type file or if key is not found we throw an exception
+    OzoneFileStatus fileStatus;
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      fileStatus = getOzoneFileStatusV1(volumeName, bucketName, keyName,
+              args.getSortDatanodes(), clientAddress, false);
+    } else {
+      fileStatus = getOzoneFileStatus(volumeName, bucketName,
+              keyName, args.getRefreshPipeline(), args.getSortDatanodes(),
+              clientAddress);
+    }
+    //if key is not of type file or if key is not found we throw an exception
     if (fileStatus.isFile()) {
       // add block token for read.
       addBlockToken4Read(fileStatus.getKeyInfo());
@@ -2534,13 +2540,18 @@ public class KeyManagerImpl implements KeyManager {
         continue;
       }
 
-      cacheOmKeyInfo.setFileName(cacheOmKeyInfo.getKeyName());
+      // make OmKeyInfo local copy to reset keyname to "fullKeyPath".
+      // In DB keyName stores only the leaf node but the list
+      // returning to the user should have full path.
+      OmKeyInfo omKeyInfo = cacheOmKeyInfo.copyObject();
+
+      omKeyInfo.setFileName(omKeyInfo.getKeyName());
       String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
-              cacheOmKeyInfo.getKeyName());
-      cacheOmKeyInfo.setKeyName(fullKeyPath);
+              omKeyInfo.getKeyName());
+      omKeyInfo.setKeyName(fullKeyPath);
 
       countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
-              seekKeyInDB, startKey, countEntries, cacheKey, cacheOmKeyInfo,
+              seekKeyInDB, startKey, countEntries, cacheKey, omKeyInfo,
               false);
     }
     return countEntries;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 780321a..302b6e7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -3676,6 +3676,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     boolean omLayoutVersionV1 =
             StringUtils.equalsIgnoreCase(version, "V1");
     OzoneManagerRatisUtils.setOmLayoutVersionV1(omLayoutVersionV1);
+    LOG.info("Configured {}={} and enabled:{} optimized OM FS operations",
+            OZONE_OM_LAYOUT_VERSION, version, omLayoutVersionV1);
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 2d98362..6142d87 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeysRenameRequest;
 import org.apache.hadoop.ozone.om.request.key.OMTrashRecoverRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
@@ -145,6 +146,9 @@ public final class OzoneManagerRatisUtils {
     case DeleteKeys:
       return new OMKeysDeleteRequest(omRequest);
     case RenameKey:
+      if (omLayoutVersionV1) {
+        return new OMKeyRenameRequestV1(omRequest);
+      }
       return new OMKeyRenameRequest(omRequest);
     case RenameKeys:
       return new OMKeysRenameRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index 3f81f40..2fb27c5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -20,13 +20,17 @@ package org.apache.hadoop.ozone.om.request.bucket;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.base.Optional;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
@@ -154,6 +158,9 @@ public class OMBucketCreateRequest extends OMClientRequest {
         getOmRequest());
     OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
 
+    // Add layout version V1 to bucket info
+    addLayoutVersionToBucket(ozoneManager, omBucketInfo);
+
     AuditLogger auditLogger = ozoneManager.getAuditLogger();
     OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
 
@@ -357,4 +364,21 @@ public class OMBucketCreateRequest extends OMClientRequest {
 
   }
 
+  private void addLayoutVersionToBucket(OzoneManager ozoneManager,
+                                        OmBucketInfo omBucketInfo) {
+    Map<String, String> metadata = omBucketInfo.getMetadata();
+    if (metadata == null) {
+      metadata = new HashMap<>();
+    }
+    OzoneConfiguration configuration = ozoneManager.getConfiguration();
+    // TODO: Many unit test cases has null config and done a simple null
+    //  check now. It can be done later, to avoid massive test code changes.
+    if (configuration != null) {
+      String layOutVersion = configuration
+              .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION,
+                      OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT);
+      metadata.put(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, layOutVersion);
+      omBucketInfo.setMetadata(metadata);
+    }
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 5225d82..e7b43d6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -665,4 +666,97 @@ public final class OMFileRequest {
     }
     return prefixName.concat(OzoneConsts.OZONE_URI_DELIMITER).concat(fileName);
   }
+
+  /**
+   * Build DirectoryInfo from OmKeyInfo.
+   *
+   * @param keyInfo omKeyInfo
+   * @return omDirectoryInfo object
+   */
+  public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo){
+    OmDirectoryInfo.Builder builder = new OmDirectoryInfo.Builder();
+    builder.setParentObjectID(keyInfo.getParentObjectID());
+    builder.setAcls(keyInfo.getAcls());
+    builder.addAllMetadata(keyInfo.getMetadata());
+    builder.setCreationTime(keyInfo.getCreationTime());
+    builder.setModificationTime(keyInfo.getModificationTime());
+    builder.setObjectID(keyInfo.getObjectID());
+    builder.setUpdateID(keyInfo.getUpdateID());
+    builder.setName(OzoneFSUtils.getFileName(keyInfo.getKeyName()));
+    return builder.build();
+  }
+
+  /**
+   * Verify that the given toKey directory is a sub directory of fromKey
+   * directory.
+   * <p>
+   * For example, special case of renaming a directory to its own
+   * sub-directory is not allowed.
+   *
+   * @param fromKeyName source path
+   * @param toKeyName   destination path
+   * @param isDir       true represents a directory type otw a file type
+   * @throws OMException if the dest dir is a sub-dir of source dir.
+   */
+  public static void verifyToDirIsASubDirOfFromDirectory(String fromKeyName,
+      String toKeyName, boolean isDir) throws OMException {
+    if (!isDir) {
+      return;
+    }
+    Path dstParent = Paths.get(toKeyName).getParent();
+    while (dstParent != null) {
+      if (Paths.get(fromKeyName).equals(dstParent)) {
+        throw new OMException("Cannot rename a directory to its own " +
+                "subdirectory", OMException.ResultCodes.KEY_RENAME_ERROR);
+        // TODO: Existing rename throws java.lang.IllegalArgumentException.
+        //       Should we throw same exception ?
+      }
+      dstParent = dstParent.getParent();
+    }
+  }
+
+  /**
+   * Verify parent exists for the destination path and return destination
+   * path parent Id.
+   * <p>
+   * Check whether dst parent dir exists or not. If the parent exists, then the
+   * source can be renamed to dst path.
+   *
+   * @param volumeName  volume name
+   * @param bucketName  bucket name
+   * @param toKeyName   destination path
+   * @param fromKeyName source path
+   * @param metaMgr     metadata manager
+   * @throws IOException if the destination parent dir doesn't exists.
+   */
+  public static long getToKeyNameParentId(String volumeName,
+      String bucketName, String toKeyName, String fromKeyName,
+      OMMetadataManager metaMgr) throws IOException {
+
+    int totalDirsCount = OzoneFSUtils.getFileCount(toKeyName);
+    // skip parent is root '/'
+    if (totalDirsCount <= 1) {
+      String bucketKey = metaMgr.getBucketKey(volumeName, bucketName);
+      OmBucketInfo omBucketInfo =
+              metaMgr.getBucketTable().get(bucketKey);
+      return omBucketInfo.getObjectID();
+    }
+
+    String toKeyParentDir = OzoneFSUtils.getParentDir(toKeyName);
+
+    OzoneFileStatus toKeyParentDirStatus = getOMKeyInfoIfExists(metaMgr,
+            volumeName, bucketName, toKeyParentDir, 0);
+    // check if the immediate parent exists
+    if (toKeyParentDirStatus == null) {
+      throw new OMException(String.format(
+              "Failed to rename %s to %s, %s doesn't exist", fromKeyName,
+              toKeyName, toKeyParentDir),
+              OMException.ResultCodes.KEY_RENAME_ERROR);
+    } else if (toKeyParentDirStatus.isFile()){
+      throw new OMException(String.format(
+              "Failed to rename %s to %s, %s is a file", fromKeyName, toKeyName,
+              toKeyParentDir), OMException.ResultCodes.KEY_RENAME_ERROR);
+    }
+    return toKeyParentDirStatus.getKeyInfo().getObjectID();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
new file mode 100644
index 0000000..74e53fe
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
@@ -0,0 +1,292 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles rename key request layout version V1.
+ */
+public class OMKeyRenameRequestV1 extends OMKeyRenameRequest {
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(OMKeyRenameRequestV1.class);
+
+  public OMKeyRenameRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest();
+    KeyArgs keyArgs = renameKeyRequest.getKeyArgs();
+    Map<String, String> auditMap = buildAuditMap(keyArgs, renameKeyRequest);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String fromKeyName = keyArgs.getKeyName();
+    String toKeyName = renameKeyRequest.getToKeyName();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumKeyRenames();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+            getOmRequest());
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    boolean acquiredLock = false;
+    OMClientResponse omClientResponse = null;
+    IOException exception = null;
+    OmKeyInfo fromKeyValue;
+    String fromKey = null;
+    Result result;
+    try {
+      if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
+        throw new OMException("Key name is empty",
+                OMException.ResultCodes.INVALID_KEY_NAME);
+      }
+
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acls to see if user has access to perform delete operation on
+      // old key and create operation on new key
+      checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName,
+              IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY);
+      checkKeyAcls(ozoneManager, volumeName, bucketName, toKeyName,
+              IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
+
+      // Validate bucket and volume exists or not.
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      // Check if fromKey exists
+      OzoneFileStatus fromKeyFileStatus =
+              OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName,
+                      bucketName, fromKeyName, 0);
+      // case-1) fromKeyName should exist, otw throws exception
+      if (fromKeyFileStatus == null) {
+        // TODO: Add support for renaming open key
+        throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND);
+      }
+
+      // source existed
+      fromKeyValue = fromKeyFileStatus.getKeyInfo();
+      boolean isRenameDirectory = fromKeyFileStatus.isDirectory();
+
+      // case-2) Cannot rename a directory to its own subdirectory
+      OMFileRequest.verifyToDirIsASubDirOfFromDirectory(fromKeyName,
+              toKeyName, fromKeyFileStatus.isDirectory());
+
+      OzoneFileStatus toKeyFileStatus =
+              OMFileRequest.getOMKeyInfoIfExists(omMetadataManager,
+                      volumeName, bucketName, toKeyName, 0);
+
+      // Check if toKey exists.
+      if(toKeyFileStatus != null) {
+        // Destination exists and following are different cases:
+        OmKeyInfo toKeyValue = toKeyFileStatus.getKeyInfo();
+
+        if (fromKeyValue.getKeyName().equals(toKeyValue.getKeyName())) {
+          // case-3) If src == destin then check source and destin of same type
+          // (a) If dst is a file then return true.
+          // (b) Otherwise throws exception.
+          // TODO: Discuss do we need to throw exception for file as well.
+          if (toKeyFileStatus.isFile()) {
+            result = Result.SUCCESS;
+          } else {
+            throw new OMException("Key already exists " + toKeyName,
+                    OMException.ResultCodes.KEY_ALREADY_EXISTS);
+          }
+        } else if (toKeyFileStatus.isDirectory()) {
+          // case-4) If dst is a directory then rename source as sub-path of it
+          // For example: rename /source to /dst will lead to /dst/source
+          String fromFileName = OzoneFSUtils.getFileName(fromKeyName);
+          String newToKeyName = OzoneFSUtils.appendFileNameToKeyPath(toKeyName,
+                  fromFileName);
+          OzoneFileStatus newToOzoneFileStatus =
+                  OMFileRequest.getOMKeyInfoIfExists(omMetadataManager,
+                          volumeName, bucketName, newToKeyName, 0);
+
+          if (newToOzoneFileStatus != null) {
+            // case-5) If new destin '/dst/source' exists then throws exception
+            throw new OMException(String.format(
+                    "Failed to rename %s to %s, file already exists or not " +
+                            "empty!", fromKeyName, newToKeyName),
+                    OMException.ResultCodes.KEY_ALREADY_EXISTS);
+          }
+
+          omClientResponse = renameKey(toKeyValue.getObjectID(), trxnLogIndex,
+                  fromKeyValue, isRenameDirectory, newToKeyName,
+                  keyArgs.getModificationTime(), omResponse, ozoneManager);
+          result = Result.SUCCESS;
+        } else {
+          // case-6) If destination is a file type and if exists then throws
+          // key already exists exception.
+          throw new OMException("Failed to rename, key already exists "
+                  + toKeyName, OMException.ResultCodes.KEY_ALREADY_EXISTS);
+        }
+      } else {
+        // Destination doesn't exist and the cases are:
+        // case-7) Check whether dst parent dir exists or not. If parent
+        // doesn't exist then throw exception, otw the source can be renamed to
+        // destination path.
+        long toKeyParentId = OMFileRequest.getToKeyNameParentId(volumeName,
+                bucketName, toKeyName, fromKeyName, omMetadataManager);
+
+        omClientResponse = renameKey(toKeyParentId, trxnLogIndex,
+                fromKeyValue, isRenameDirectory, toKeyName,
+                keyArgs.getModificationTime(), omResponse, ozoneManager);
+
+        result = Result.SUCCESS;
+      }
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new OMKeyRenameResponse(createErrorOMResponse(
+              omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+              omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+                bucketName);
+      }
+    }
+
+    auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap,
+            exception, getOmRequest().getUserInfo()));
+
+    switch (result) {
+    case SUCCESS:
+      LOG.debug("Rename Key is successfully completed for volume:{} bucket:{}" +
+                      " fromKey:{} toKey:{}. ", volumeName, bucketName,
+              fromKeyName, toKeyName);
+      break;
+    case FAILURE:
+      ozoneManager.getMetrics().incNumKeyRenameFails();
+      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} " +
+                      "toKey:{}. Key: {} not found.", volumeName, bucketName,
+              fromKeyName, toKeyName, fromKeyName);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeyRenameRequest: {}",
+              renameKeyRequest);
+    }
+    return omClientResponse;
+  }
+
+  @SuppressWarnings("parameternumber")
+  private OMClientResponse renameKey(long toKeyParentId,
+      long trxnLogIndex, OmKeyInfo fromKeyValue, boolean isRenameDirectory,
+      String toKeyName, long modificationTime, OMResponse.Builder omResponse,
+      OzoneManager ozoneManager) {
+
+    String dbFromKey = fromKeyValue.getPath();
+    String toKeyFileName = OzoneFSUtils.getFileName(toKeyName);
+
+    fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+    // Set toFileName
+    fromKeyValue.setKeyName(toKeyFileName);
+    fromKeyValue.setFileName(toKeyFileName);
+    // Set toKeyObjectId
+    fromKeyValue.setParentObjectID(toKeyParentId);
+    //Set modification time
+    fromKeyValue.setModificationTime(modificationTime);
+
+    // destination dbKeyName
+    String dbToKey = fromKeyValue.getPath();
+
+    // Add to cache.
+    // dbFromKey should be deleted, dbToKey should be added with newly updated
+    // omKeyInfo.
+    // Add from_key and to_key details into cache.
+    OMMetadataManager metadataMgr = ozoneManager.getMetadataManager();
+    if (isRenameDirectory) {
+      Table<String, OmDirectoryInfo> dirTable = metadataMgr.getDirectoryTable();
+      dirTable.addCacheEntry(new CacheKey<>(dbFromKey),
+              new CacheValue<>(Optional.absent(), trxnLogIndex));
+
+      dirTable.addCacheEntry(new CacheKey<>(dbToKey),
+              new CacheValue<>(Optional.of(OMFileRequest.
+                              getDirectoryInfo(fromKeyValue)), trxnLogIndex));
+    } else {
+      Table<String, OmKeyInfo> keyTable = metadataMgr.getKeyTable();
+
+      keyTable.addCacheEntry(new CacheKey<>(dbFromKey),
+              new CacheValue<>(Optional.absent(), trxnLogIndex));
+
+      keyTable.addCacheEntry(new CacheKey<>(dbToKey),
+              new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex));
+    }
+
+    OMClientResponse omClientResponse = new OMKeyRenameResponseV1(omResponse
+            .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(),
+            dbFromKey, dbToKey, fromKeyValue, isRenameDirectory);
+    return omClientResponse;
+  }
+
+  private Map<String, String> buildAuditMap(
+          KeyArgs keyArgs, RenameKeyRequest renameKeyRequest) {
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    auditMap.remove(OzoneConsts.KEY);
+    auditMap.put(OzoneConsts.SRC_KEY, keyArgs.getKeyName());
+    auditMap.put(OzoneConsts.DST_KEY, renameKeyRequest.getToKeyName());
+    return auditMap;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
index 7470b37..3b7edf1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
@@ -70,4 +70,15 @@ public class OMKeyRenameResponse extends OMClientResponse {
         renameKeyInfo);
   }
 
+  public OmKeyInfo getRenameKeyInfo() {
+    return renameKeyInfo;
+  }
+
+  public String getFromKeyName() {
+    return fromKeyName;
+  }
+
+  public String getToKeyName() {
+    return toKeyName;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
similarity index 50%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
index 7470b37..7a9b159 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
@@ -18,56 +18,52 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
-import java.io.IOException;
 import javax.annotation.Nonnull;
+import java.io.IOException;
 
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.*;
 
 /**
- * Response for RenameKey request.
+ * Response for RenameKey request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = {KEY_TABLE})
-public class OMKeyRenameResponse extends OMClientResponse {
-
-  private String fromKeyName;
-  private String toKeyName;
-  private OmKeyInfo renameKeyInfo;
+@CleanupTableInfo(cleanupTables = {FILE_TABLE, DIRECTORY_TABLE})
+public class OMKeyRenameResponseV1 extends OMKeyRenameResponse {
 
-  public OMKeyRenameResponse(@Nonnull OMResponse omResponse,
-      String fromKeyName, String toKeyName, @Nonnull OmKeyInfo renameKeyInfo) {
-    super(omResponse);
-    this.fromKeyName = fromKeyName;
-    this.toKeyName = toKeyName;
-    this.renameKeyInfo = renameKeyInfo;
-  }
+  private boolean isRenameDirectory;
 
-  /**
-   * For when the request is not successful.
-   * For a successful request, the other constructor should be used.
-   */
-  public OMKeyRenameResponse(@Nonnull OMResponse omResponse) {
-    super(omResponse);
-    checkStatusNotOK();
+  public OMKeyRenameResponseV1(@Nonnull OMResponse omResponse,
+      String fromKeyName, String toKeyName, @Nonnull OmKeyInfo renameKeyInfo,
+      boolean isRenameDirectory) {
+    super(omResponse, fromKeyName, toKeyName, renameKeyInfo);
+    this.isRenameDirectory = isRenameDirectory;
   }
 
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-    String volumeName = renameKeyInfo.getVolumeName();
-    String bucketName = renameKeyInfo.getBucketName();
-    omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
-        omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName));
-    omMetadataManager.getKeyTable().putWithBatch(batchOperation,
-        omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName),
-        renameKeyInfo);
-  }
+                           BatchOperation batchOperation) throws IOException {
+
+    if (isRenameDirectory) {
+      omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation,
+              getFromKeyName());
 
+      OmDirectoryInfo renameDirInfo =
+              OMFileRequest.getDirectoryInfo(getRenameKeyInfo());
+      omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
+              getToKeyName(), renameDirInfo);
+
+    } else {
+      omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
+              getFromKeyName());
+      omMetadataManager.getKeyTable().putWithBatch(batchOperation,
+              getToKeyName(), getRenameKeyInfo());
+    }
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 6784dea..56c7e10 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -523,4 +524,8 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
     return blockLocations;
   }
 
+  @Override
+  public String getBucketLayoutVersion() {
+    return bucket.getMetadata().get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION);
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index e5695df..8a466a9 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -314,6 +315,7 @@ public class BasicOzoneFileSystem extends FileSystem {
 
     String srcPath = src.toUri().getPath();
     String dstPath = dst.toUri().getPath();
+    // TODO: Discuss do we need to throw exception.
     if (srcPath.equals(dstPath)) {
       return true;
     }
@@ -325,6 +327,12 @@ public class BasicOzoneFileSystem extends FileSystem {
       return false;
     }
 
+    String layOutVersion = adapter.getBucketLayoutVersion();
+    if (layOutVersion != null &&
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) {
+      return renameV1(srcPath, dstPath);
+    }
+
     // Check if the source exists
     FileStatus srcStatus;
     try {
@@ -405,6 +413,11 @@ public class BasicOzoneFileSystem extends FileSystem {
     return result;
   }
 
+  private boolean renameV1(String srcPath, String dstPath) throws IOException {
+    adapter.renameKey(srcPath, dstPath);
+    return true;
+  }
+
   /**
    * Intercept rename to trash calls from TrashPolicyDefault.
    */
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 74f5557..4ea08f2 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -1031,4 +1032,10 @@ public class BasicRootedOzoneClientAdapterImpl
         null, null, null, new BlockLocation[0]
     );
   }
+
+  @Override
+  public String getBucketLayoutVersion() {
+    // TODO: Need to refine this part.
+    return OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
index b9e2881..5b65a0e 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
@@ -75,4 +75,5 @@ public interface OzoneClientAdapter {
   FileStatusAdapter getFileStatus(String key, URI uri,
       Path qualifiedPath, String userName) throws IOException;
 
+  String getBucketLayoutVersion();
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 14/29: HDDS-4771. [FSO]S3MultiPart: Implement InitiateMultiPartUpload (#1877)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit f6abf23a8d0184273c4d16c126206370ab3896b8
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Fri Feb 5 11:16:15 2021 +0530

    HDDS-4771. [FSO]S3MultiPart: Implement InitiateMultiPartUpload (#1877)
---
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |  32 ++-
 .../ozone/om/helpers/OmMultipartKeyInfo.java       |  63 ++++-
 .../rpc/TestOzoneClientMultipartUploadV1.java      | 182 ++++++++++++++
 .../src/main/proto/OmClientProtocol.proto          |   1 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  10 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  26 +-
 .../hadoop/ozone/om/codec/OMDBDefinition.java      |  11 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../ozone/om/request/file/OMFileRequest.java       |  26 ++
 .../S3InitiateMultipartUploadRequestV1.java        | 271 +++++++++++++++++++++
 .../S3MultipartUploadCompleteRequest.java          |   5 +-
 .../S3InitiateMultipartUploadResponseV1.java       |  80 ++++++
 .../TestS3InitiateMultipartUploadRequestV1.java    | 186 ++++++++++++++
 .../s3/multipart/TestS3MultipartRequest.java       |  29 +++
 .../TestS3InitiateMultipartUploadResponse.java     |   2 +-
 .../TestS3InitiateMultipartUploadResponseV1.java   |  86 +++++++
 .../s3/multipart/TestS3MultipartResponse.java      |  46 ++++
 17 files changed, 1048 insertions(+), 12 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index d097714..be68d9b 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -54,8 +54,36 @@ public final class OmKeyInfo extends WithObjectID {
   private HddsProtos.ReplicationType type;
   private HddsProtos.ReplicationFactor factor;
   private FileEncryptionInfo encInfo;
-  private String fileName; // leaf node name
-  private long parentObjectID; // pointer to parent directory
+
+  /**
+   * A pointer to parent directory used for path traversal. ParentID will be
+   * used only when the key is created into a FileSystemOptimized(FSO) bucket.
+   * <p>
+   * For example, if a key "a/b/key1" created into a FSOBucket then each
+   * path component will be assigned an ObjectId and linked to its parent path
+   * component using parent's objectID.
+   * <p>
+   * Say, Bucket's ObjectID = 512, which is the parent for its immediate child
+   * element.
+   * <p>
+   * ------------------------------------------|
+   * PathComponent |   ObjectID   |   ParentID |
+   * ------------------------------------------|
+   *      a        |     1024     |     512    |
+   * ------------------------------------------|
+   *      b        |     1025     |     1024   |
+   * ------------------------------------------|
+   *     key1      |     1026     |     1025   |
+   * ------------------------------------------|
+   */
+  private long parentObjectID;
+
+  /**
+   * Represents leaf node name. This also will be used when the keyName is
+   * created on a FileSystemOptimized(FSO) bucket. For example, the user given
+   * keyName is "a/b/key1" then the fileName stores "key1".
+   */
+  private String fileName;
 
   /**
    * ACL Information.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
index df8751c..be37f93 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
@@ -37,6 +37,30 @@ public class OmMultipartKeyInfo extends WithObjectID {
   private TreeMap<Integer, PartKeyInfo> partKeyInfoList;
 
   /**
+   * A pointer to parent directory used for path traversal. ParentID will be
+   * used only when the multipart key is created into a FileSystemOptimized(FSO)
+   * bucket.
+   * <p>
+   * For example, if a key "a/b/multiKey1" created into a FSOBucket then each
+   * path component will be assigned an ObjectId and linked to its parent path
+   * component using parent's objectID.
+   * <p>
+   * Say, Bucket's ObjectID = 512, which is the parent for its immediate child
+   * element.
+   * <p>
+   * ------------------------------------------|
+   * PathComponent |   ObjectID   |   ParentID |
+   * ------------------------------------------|
+   *      a        |     1024     |     512    |
+   * ------------------------------------------|
+   *      b        |     1025     |     1024   |
+   * ------------------------------------------|
+   *   multiKey1   |     1026     |     1025   |
+   * ------------------------------------------|
+   */
+  private long parentID;
+
+  /**
    * Construct OmMultipartKeyInfo object which holds multipart upload
    * information for a key.
    */
@@ -53,6 +77,29 @@ public class OmMultipartKeyInfo extends WithObjectID {
   }
 
   /**
+   * Construct OmMultipartKeyInfo object which holds multipart upload
+   * information for a key.
+   */
+  @SuppressWarnings("parameternumber")
+  public OmMultipartKeyInfo(String id, long creationTime,
+      ReplicationType replicationType, ReplicationFactor replicationFactor,
+      Map<Integer, PartKeyInfo> list, long objectID, long updateID,
+      long parentObjId) {
+    this(id, creationTime, replicationType, replicationFactor, list, objectID,
+            updateID);
+    this.parentID = parentObjId;
+  }
+
+  /**
+   * Returns parentID.
+   *
+   * @return long
+   */
+  public long getParentID() {
+    return parentID;
+  }
+
+  /**
    * Returns the uploadID for this multi part upload of a key.
    * @return uploadID
    */
@@ -95,6 +142,7 @@ public class OmMultipartKeyInfo extends WithObjectID {
     private TreeMap<Integer, PartKeyInfo> partKeyInfoList;
     private long objectID;
     private long updateID;
+    private long parentID;
 
     public Builder() {
       this.partKeyInfoList = new TreeMap<>();
@@ -144,9 +192,14 @@ public class OmMultipartKeyInfo extends WithObjectID {
       return this;
     }
 
+    public Builder setParentID(long parentObjId) {
+      this.parentID = parentObjId;
+      return this;
+    }
+
     public OmMultipartKeyInfo build() {
       return new OmMultipartKeyInfo(uploadID, creationTime, replicationType,
-          replicationFactor, partKeyInfoList, objectID, updateID);
+          replicationFactor, partKeyInfoList, objectID, updateID, parentID);
     }
   }
 
@@ -163,7 +216,7 @@ public class OmMultipartKeyInfo extends WithObjectID {
     return new OmMultipartKeyInfo(multipartKeyInfo.getUploadID(),
         multipartKeyInfo.getCreationTime(), multipartKeyInfo.getType(),
         multipartKeyInfo.getFactor(), list, multipartKeyInfo.getObjectID(),
-        multipartKeyInfo.getUpdateID());
+        multipartKeyInfo.getUpdateID(), multipartKeyInfo.getParentID());
   }
 
   /**
@@ -177,7 +230,8 @@ public class OmMultipartKeyInfo extends WithObjectID {
         .setType(replicationType)
         .setFactor(replicationFactor)
         .setObjectID(objectID)
-        .setUpdateID(updateID);
+        .setUpdateID(updateID)
+        .setParentID(parentID);
     partKeyInfoList.forEach((key, value) -> builder.addPartKeyInfoList(value));
     return builder.build();
   }
@@ -205,7 +259,8 @@ public class OmMultipartKeyInfo extends WithObjectID {
     // For partKeyInfoList we can do shallow copy here, as the PartKeyInfo is
     // immutable here.
     return new OmMultipartKeyInfo(uploadID, creationTime, replicationType,
-        replicationFactor, new TreeMap<>(partKeyInfoList), objectID, updateID);
+        replicationFactor, new TreeMap<>(partKeyInfoList), objectID, updateID,
+        parentID);
   }
 
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
new file mode 100644
index 0000000..93e5826
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+
+/**
+ * This test verifies all the S3 multipart client apis - layout version V1.
+ */
+public class TestOzoneClientMultipartUploadV1 {
+
+  private static ObjectStore store = null;
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneClient ozClient = null;
+
+  private static String scmId = UUID.randomUUID().toString();
+
+  /**
+   * Set a timeout for each test.
+   */
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  /**
+   * Create a MiniOzoneCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+    startCluster(conf);
+  }
+
+  /**
+   * Close OzoneClient and shutdown MiniOzoneCluster.
+   */
+  @AfterClass
+  public static void shutdown() throws IOException {
+    shutdownCluster();
+  }
+
+
+  /**
+   * Create a MiniOzoneCluster for testing.
+   * @param conf Configurations to start the cluster.
+   * @throws Exception
+   */
+  static void startCluster(OzoneConfiguration conf) throws Exception {
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .setTotalPipelineNumLimit(10)
+            .setScmId(scmId)
+            .build();
+    cluster.waitForClusterToBeReady();
+    ozClient = OzoneClientFactory.getRpcClient(conf);
+    store = ozClient.getObjectStore();
+  }
+
+  /**
+   * Close OzoneClient and shutdown MiniOzoneCluster.
+   */
+  static void shutdownCluster() throws IOException {
+    if(ozClient != null) {
+      ozClient.close();
+    }
+
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testInitiateMultipartUploadWithReplicationInformationSet() throws
+          IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotNull(multipartInfo.getUploadID());
+
+    // Call initiate multipart upload for the same key again, this should
+    // generate a new uploadID.
+    multipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    assertNotNull(multipartInfo);
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotEquals(multipartInfo.getUploadID(), uploadID);
+    assertNotNull(multipartInfo.getUploadID());
+  }
+
+  @Test
+  public void testInitiateMultipartUploadWithDefaultReplication() throws
+          IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName);
+
+    assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotNull(multipartInfo.getUploadID());
+
+    // Call initiate multipart upload for the same key again, this should
+    // generate a new uploadID.
+    multipartInfo = bucket.initiateMultipartUpload(keyName);
+
+    assertNotNull(multipartInfo);
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotEquals(multipartInfo.getUploadID(), uploadID);
+    assertNotNull(multipartInfo.getUploadID());
+  }
+
+}
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index d6b5dbd..2bae4e5 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1101,6 +1101,7 @@ message MultipartKeyInfo {
     repeated PartKeyInfo partKeyInfoList = 5;
     optional uint64 objectID = 6;
     optional uint64 updateID = 7;
+    optional uint64 parentID = 8;
 }
 
 message PartKeyInfo {
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 7ff684b..4c66040 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -426,4 +426,14 @@ public interface OMMetadataManager extends DBStoreHAManager {
    * @return DB directory key as String.
    */
   String getOpenFileName(long parentObjectId, String fileName, long id);
+
+  /**
+   * Returns the DB key name of a multipart upload key in OM metadata store.
+   *
+   * @param parentObjectId - parent object Id
+   * @param fileName       - file name
+   * @param uploadId       - the upload id for this key
+   * @return bytes of DB key.
+   */
+  String getMultipartKey(long parentObjectId, String fileName, String uploadId);
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 4a55108..4e4f91b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -131,9 +131,11 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * |  directoryTable    | parentId/directoryName -> DirectoryInfo         |
    * |----------------------------------------------------------------------|
-   * |  fileTable         | parentId/fileName -> KeyInfo                |
+   * |  fileTable         | parentId/fileName -> KeyInfo                    |
    * |----------------------------------------------------------------------|
-   * |  openFileTable     | parentId/fileName/id -> KeyInfo                   |
+   * |  openFileTable     | parentId/fileName/id -> KeyInfo                 |
+   * |----------------------------------------------------------------------|
+   * |  multipartFileInfoTable | parentId/fileName/uploadId ->...           |
    * |----------------------------------------------------------------------|
    * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
    * |----------------------------------------------------------------------|
@@ -152,6 +154,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String DIRECTORY_TABLE = "directoryTable";
   public static final String FILE_TABLE = "fileTable";
   public static final String OPEN_FILE_TABLE = "openFileTable";
+  public static final String MULTIPARTFILEINFO_TABLE = "multipartFileInfoTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
@@ -176,6 +179,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private Table transactionInfoTable;
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
+  private Table<String, OmMultipartKeyInfo> multipartFileInfoTable;
 
   // Epoch is used to generate the objectIDs. The most significant 2 bits of
   // objectIDs is set to this epoch. For clusters before HDDS-4315 there is
@@ -271,6 +275,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmMultipartKeyInfo> getMultipartInfoTable() {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      return multipartFileInfoTable;
+    }
     return multipartInfoTable;
   }
 
@@ -364,6 +371,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addTable(DIRECTORY_TABLE)
         .addTable(FILE_TABLE)
         .addTable(OPEN_FILE_TABLE)
+        .addTable(MULTIPARTFILEINFO_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -442,6 +450,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
             OmKeyInfo.class);
     checkTableStatus(openFileTable, OPEN_FILE_TABLE);
 
+    multipartFileInfoTable = this.store.getTable(MULTIPARTFILEINFO_TABLE,
+            String.class, OmMultipartKeyInfo.class);
+    checkTableStatus(multipartFileInfoTable, MULTIPARTFILEINFO_TABLE);
+
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
         String.class, TransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
@@ -1224,4 +1236,14 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
     openKey.append(OM_KEY_PREFIX).append(id);
     return openKey.toString();
   }
+
+  @Override
+  public String getMultipartKey(long parentID, String fileName,
+                                String uploadId) {
+    StringBuilder openKey = new StringBuilder();
+    openKey.append(parentID);
+    openKey.append(OM_KEY_PREFIX).append(fileName);
+    openKey.append(OM_KEY_PREFIX).append(uploadId);
+    return openKey.toString();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index a968047..77b9e04 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -171,6 +171,15 @@ public class OMDBDefinition implements DBDefinition {
                   OmKeyInfo.class,
                   new OmKeyInfoCodec(true));
 
+  public static final DBColumnFamilyDefinition<String, OmMultipartKeyInfo>
+          MULTIPART_FILEINFO_TABLE =
+          new DBColumnFamilyDefinition<>(
+                  OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE,
+                  String.class,
+                  new StringCodec(),
+                  OmMultipartKeyInfo.class,
+                  new OmMultipartKeyInfoCodec());
+
   @Override
   public String getName() {
     return OzoneConsts.OM_DB_NAME;
@@ -187,7 +196,7 @@ public class OMDBDefinition implements DBDefinition {
         VOLUME_TABLE, OPEN_KEY_TABLE, KEY_TABLE,
         BUCKET_TABLE, MULTIPART_INFO_TABLE, PREFIX_TABLE, DTOKEN_TABLE,
         S3_SECRET_TABLE, TRANSACTION_INFO_TABLE, DIRECTORY_TABLE,
-        FILE_TABLE, OPEN_FILE_TABLE};
+        FILE_TABLE, OPEN_FILE_TABLE, MULTIPART_FILEINFO_TABLE};
   }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 93e49f0..08fc09f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixAddAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixSetAclRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
@@ -178,6 +179,9 @@ public final class OzoneManagerRatisUtils {
     case PurgeKeys:
       return new OMKeyPurgeRequest(omRequest);
     case InitiateMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3InitiateMultipartUploadRequestV1(omRequest);
+      }
       return new S3InitiateMultipartUploadRequest(omRequest);
     case CommitMultiPartUpload:
       return new S3MultipartUploadCommitPartRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index aadc126..7f2d2c5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -456,6 +456,7 @@ public final class OMFileRequest {
       // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
       // keyName field stores only the leaf node name, which is 'file1'.
       omFileInfo.setKeyName(fileName);
+      omFileInfo.setFileName(fileName);
       keyInfoOptional = Optional.of(omFileInfo);
     }
 
@@ -481,6 +482,7 @@ public final class OMFileRequest {
     // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
     // keyName field stores only the leaf node name, which is 'file1'.
     omFileInfo.setKeyName(fileName);
+    omFileInfo.setFileName(fileName);
 
     omMetadataManager.getKeyTable().addCacheEntry(
             new CacheKey<>(dbFileKey),
@@ -511,6 +513,30 @@ public final class OMFileRequest {
   }
 
   /**
+   * Adding multipart omKeyInfo to open file table.
+   *
+   * @param omMetadataMgr OM Metadata Manager
+   * @param batchOp       batch of db operations
+   * @param omFileInfo    omKeyInfo
+   * @param uploadID      uploadID
+   * @return multipartFileKey
+   * @throws IOException DB failure
+   */
+  public static String addToOpenFileTable(OMMetadataManager omMetadataMgr,
+      BatchOperation batchOp, OmKeyInfo omFileInfo, String uploadID)
+          throws IOException {
+
+    String multipartFileKey = omMetadataMgr.getMultipartKey(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName(),
+            uploadID);
+
+    omMetadataMgr.getOpenKeyTable().putWithBatch(batchOp, multipartFileKey,
+            omFileInfo);
+
+    return multipartFileKey;
+  }
+
+  /**
    * Adding omKeyInfo to file table.
    *
    * @param omMetadataMgr
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
new file mode 100644
index 0000000..3507090
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
+
+/**
+ * Handles initiate multipart upload request.
+ */
+public class S3InitiateMultipartUploadRequestV1
+        extends S3InitiateMultipartUploadRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(S3InitiateMultipartUploadRequestV1.class);
+
+  public S3InitiateMultipartUploadRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long transactionLogIndex,
+      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
+    MultipartInfoInitiateRequest multipartInfoInitiateRequest =
+        getOmRequest().getInitiateMultiPartUploadRequest();
+
+    KeyArgs keyArgs =
+        multipartInfoInitiateRequest.getKeyArgs();
+
+    Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
+
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    final String requestedVolume = volumeName;
+    final String requestedBucket = bucketName;
+    String keyName = keyArgs.getKeyName();
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    ozoneManager.getMetrics().incNumInitiateMultipartUploads();
+    boolean acquiredBucketLock = false;
+    IOException exception = null;
+    OmMultipartKeyInfo multipartKeyInfo = null;
+    OmKeyInfo omKeyInfo = null;
+    List<OmDirectoryInfo> missingParentInfos;
+    Result result = null;
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    OMClientResponse omClientResponse = null;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // TODO to support S3 ACL later.
+      acquiredBucketLock =
+          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      // If KMS is configured and TDE is enabled on bucket, throw MPU not
+      // supported.
+      if (ozoneManager.getKmsProvider() != null) {
+        if (omMetadataManager.getBucketTable().get(
+            omMetadataManager.getBucketKey(volumeName, bucketName))
+            .getEncryptionKeyInfo() != null) {
+          throw new OMException("MultipartUpload is not yet supported on " +
+              "encrypted buckets", NOT_SUPPORTED_OPERATION);
+        }
+      }
+
+      OMFileRequest.OMPathInfoV1 pathInfoV1 =
+              OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+                      volumeName, bucketName, keyName, Paths.get(keyName));
+
+      // check if the directory already existed in OM
+      checkDirectoryResult(keyName, pathInfoV1.getDirectoryResult());
+
+      // add all missing parents to dir table
+      missingParentInfos =
+              OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
+                      ozoneManager, keyArgs, pathInfoV1, transactionLogIndex);
+
+      // We are adding uploadId to key, because if multiple users try to
+      // perform multipart upload on the same key, each will try to upload, who
+      // ever finally commit the key, we see that key in ozone. Suppose if we
+      // don't add id, and use the same key /volume/bucket/key, when multiple
+      // users try to upload the key, we update the parts of the key's from
+      // multiple users to same key, and the key output can be a mix of the
+      // parts from multiple users.
+
+      // So on same key if multiple time multipart upload is initiated we
+      // store multiple entries in the openKey Table.
+      // Checked AWS S3, when we try to run multipart upload, each time a
+      // new uploadId is returned. And also even if a key exist when initiate
+      // multipart upload request is received, it returns multipart upload id
+      // for the key.
+
+      String multipartKey = omMetadataManager.getMultipartKey(
+              pathInfoV1.getLastKnownParentId(), pathInfoV1.getLeafNodeName(),
+              keyArgs.getMultipartUploadID());
+
+      // Even if this key already exists in the KeyTable, it would be taken
+      // care of in the final complete multipart upload. AWS S3 behavior is
+      // also like this, even when key exists in a bucket, user can still
+      // initiate MPU.
+
+      multipartKeyInfo = new OmMultipartKeyInfo.Builder()
+          .setUploadID(keyArgs.getMultipartUploadID())
+          .setCreationTime(keyArgs.getModificationTime())
+          .setReplicationType(keyArgs.getType())
+          .setReplicationFactor(keyArgs.getFactor())
+          .setObjectID(pathInfoV1.getLeafNodeObjectId())
+          .setUpdateID(transactionLogIndex)
+          .setParentID(pathInfoV1.getLastKnownParentId())
+          .build();
+
+      omKeyInfo = new OmKeyInfo.Builder()
+          .setVolumeName(volumeName)
+          .setBucketName(bucketName)
+          .setKeyName(keyArgs.getKeyName())
+          .setCreationTime(keyArgs.getModificationTime())
+          .setModificationTime(keyArgs.getModificationTime())
+          .setReplicationType(keyArgs.getType())
+          .setReplicationFactor(keyArgs.getFactor())
+          .setOmKeyLocationInfos(Collections.singletonList(
+              new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+          .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()))
+          .setObjectID(pathInfoV1.getLeafNodeObjectId())
+          .setUpdateID(transactionLogIndex)
+          .setParentObjectID(pathInfoV1.getLastKnownParentId())
+          .build();
+
+      // Add cache entries for the prefix directories.
+      // Skip adding for the file key itself, until Key Commit.
+      OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+              Optional.absent(), Optional.of(missingParentInfos),
+              transactionLogIndex);
+
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
+              multipartKey, omKeyInfo, pathInfoV1.getLeafNodeName(),
+              transactionLogIndex);
+
+      // Add to cache
+      omMetadataManager.getMultipartInfoTable().addCacheEntry(
+          new CacheKey<>(multipartKey),
+          new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
+
+      omClientResponse =
+          new S3InitiateMultipartUploadResponseV1(
+              omResponse.setInitiateMultiPartUploadResponse(
+                  MultipartInfoInitiateResponse.newBuilder()
+                      .setVolumeName(requestedVolume)
+                      .setBucketName(requestedBucket)
+                      .setKeyName(keyName)
+                      .setMultipartUploadID(keyArgs.getMultipartUploadID()))
+                  .build(), multipartKeyInfo, omKeyInfo, missingParentInfos);
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new S3InitiateMultipartUploadResponse(
+          createErrorOMResponse(omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(transactionLogIndex, omClientResponse,
+          ozoneManagerDoubleBufferHelper);
+      if (acquiredBucketLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK,
+            volumeName, bucketName);
+      }
+    }
+
+    // audit log
+    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+        OMAction.INITIATE_MULTIPART_UPLOAD, auditMap,
+        exception, getOmRequest().getUserInfo()));
+
+    switch (result) {
+    case SUCCESS:
+      LOG.debug("S3 InitiateMultipart Upload request for Key {} in " +
+              "Volume/Bucket {}/{} is successfully completed", keyName,
+          volumeName, bucketName);
+      break;
+    case FAILURE:
+      ozoneManager.getMetrics().incNumInitiateMultipartUploadFails();
+      LOG.error("S3 InitiateMultipart Upload request for Key {} in " +
+              "Volume/Bucket {}/{} is failed", keyName, volumeName, bucketName,
+          exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}",
+          multipartInfoInitiateRequest);
+    }
+
+    return omClientResponse;
+  }
+
+  /**
+   * Verify om directory result.
+   *
+   * @param keyName           key name
+   * @param omDirectoryResult directory result
+   * @throws OMException if file or directory or file exists in the given path
+   */
+  private void checkDirectoryResult(String keyName,
+      OMFileRequest.OMDirectoryResult omDirectoryResult) throws OMException {
+    if (omDirectoryResult == DIRECTORY_EXISTS) {
+      throw new OMException("Can not write to directory: " + keyName,
+              OMException.ResultCodes.NOT_A_FILE);
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index f1336fc..7bba5bd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -146,9 +146,10 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
       // Check for directory exists with same name, if it exists throw error. 
       if (ozoneManager.getEnableFileSystemPaths()) {
         if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
-            omMetadataManager)) {
+                omMetadataManager)) {
           throw new OMException("Can not Complete MPU for file: " + keyName +
-              " as there is already directory in the given path", NOT_A_FILE);
+                  " as there is already directory in the given path",
+                  NOT_A_FILE);
         }
       }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseV1.java
new file mode 100644
index 0000000..ff3e63f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseV1.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for S3 Initiate Multipart Upload request for layout V1.
+ */
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE, OPEN_FILE_TABLE,
+        MULTIPARTFILEINFO_TABLE})
+public class S3InitiateMultipartUploadResponseV1 extends
+        S3InitiateMultipartUploadResponse {
+  private List<OmDirectoryInfo> parentDirInfos;
+
+  public S3InitiateMultipartUploadResponseV1(
+      @Nonnull OMResponse omResponse,
+      @Nonnull OmMultipartKeyInfo omMultipartKeyInfo,
+      @Nonnull OmKeyInfo omKeyInfo,
+      @Nonnull List<OmDirectoryInfo> parentDirInfos) {
+    super(omResponse, omMultipartKeyInfo, omKeyInfo);
+    this.parentDirInfos = parentDirInfos;
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    /**
+     * Create parent directory entries during MultiPartFileKey Create - do not
+     * wait for File Commit request.
+     */
+    if (parentDirInfos != null) {
+      for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+        String parentKey = parentDirInfo.getPath();
+        omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
+                parentKey, parentDirInfo);
+      }
+    }
+
+    String multipartFileKey =
+            OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation,
+                    getOmKeyInfo(), getOmMultipartKeyInfo().getUploadID());
+
+    omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation,
+            multipartFileKey, getOmMultipartKeyInfo());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
new file mode 100644
index 0000000..dac2efe
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Tests S3 Initiate Multipart Upload request.
+ */
+public class TestS3InitiateMultipartUploadRequestV1
+    extends TestS3InitiateMultipartUploadRequest {
+
+  @Test
+  public void testValidateAndUpdateCache() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String prefix = "a/b/c/";
+    List<String> dirs = new ArrayList<String>();
+    dirs.add("a");
+    dirs.add("b");
+    dirs.add("c");
+    String fileName = UUID.randomUUID().toString();
+    String keyName = prefix + fileName;
+
+    // Add volume and bucket to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+        omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest modifiedRequest = doPreExecuteInitiateMPUV1(volumeName,
+        bucketName, keyName);
+
+    S3InitiateMultipartUploadRequestV1 s3InitiateMultipartUploadRequestV1 =
+        new S3InitiateMultipartUploadRequestV1(modifiedRequest);
+
+    OMClientResponse omClientResponse =
+            s3InitiateMultipartUploadRequestV1.validateAndUpdateCache(
+                    ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    long parentID = verifyDirectoriesInDB(dirs, bucketID);
+
+    String multipartFileKey = omMetadataManager.getMultipartKey(parentID,
+            fileName, modifiedRequest.getInitiateMultiPartUploadRequest()
+                    .getKeyArgs().getMultipartUploadID());
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+            .get(multipartFileKey);
+    Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
+    Assert.assertEquals("FileName mismatches!", fileName,
+            omKeyInfo.getKeyName());
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omKeyInfo.getParentObjectID());
+
+    OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager
+            .getMultipartInfoTable().get(multipartFileKey);
+    Assert.assertNotNull("Failed to find the multipartFileInfo",
+            omMultipartKeyInfo);
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omMultipartKeyInfo.getParentID());
+
+    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID(),
+        omMultipartKeyInfo
+            .getUploadID());
+
+    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
+        .getKeyArgs().getModificationTime(),
+        omKeyInfo
+        .getModificationTime());
+    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getModificationTime(),
+        omKeyInfo
+            .getCreationTime());
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
+
+    OMRequest modifiedRequest = doPreExecuteInitiateMPU(
+        volumeName, bucketName, keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+        new S3InitiateMultipartUploadRequest(modifiedRequest);
+
+    OMClientResponse omClientResponse =
+        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
+            100L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
+        omClientResponse.getOMResponse().getStatus());
+
+    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID());
+
+    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isEmpty());
+    Assert.assertTrue(omMetadataManager.getMultipartInfoTable().isEmpty());
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, bucketName,
+        keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+        new S3InitiateMultipartUploadRequest(modifiedRequest);
+
+    OMClientResponse omClientResponse =
+        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
+            100L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
+        omClientResponse.getOMResponse().getStatus());
+
+    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isEmpty());
+    Assert.assertTrue(omMetadataManager.getMultipartInfoTable().isEmpty());
+  }
+
+  private long verifyDirectoriesInDB(List<String> dirs, long bucketID)
+      throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      OmDirectoryInfo omDirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      Assert.assertNotNull("Invalid directory!", omDirInfo);
+      Assert.assertEquals("Invalid directory!", dirName, omDirInfo.getName());
+      Assert.assertEquals("Invalid dir path!",
+              parentID + "/" + dirName, omDirInfo.getPath());
+      parentID = omDirInfo.getObjectID();
+    }
+    return parentID;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index f2c5b66..641ee8d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -218,4 +218,33 @@ public class TestS3MultipartRequest {
   }
 
 
+  /**
+   * Perform preExecute of Initiate Multipart upload request for given
+   * volume, bucket and key name.
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @return OMRequest - returned from preExecute.
+   */
+  protected OMRequest doPreExecuteInitiateMPUV1(
+      String volumeName, String bucketName, String keyName) throws Exception {
+    OMRequest omRequest =
+            TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName,
+                    keyName);
+
+    S3InitiateMultipartUploadRequestV1 s3InitiateMultipartUploadRequestV1 =
+            new S3InitiateMultipartUploadRequestV1(omRequest);
+
+    OMRequest modifiedRequest =
+            s3InitiateMultipartUploadRequestV1.preExecute(ozoneManager);
+
+    Assert.assertNotEquals(omRequest, modifiedRequest);
+    Assert.assertTrue(modifiedRequest.hasInitiateMultiPartUploadRequest());
+    Assert.assertNotNull(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID());
+    Assert.assertTrue(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getModificationTime() > 0);
+
+    return modifiedRequest;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
index 4996bd0..03065ab 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
@@ -31,7 +31,7 @@ public class TestS3InitiateMultipartUploadResponse
     extends TestS3MultipartResponse {
 
   @Test
-  public void addDBToBatch() throws Exception {
+  public void testAddDBToBatch() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     String keyName = UUID.randomUUID().toString();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java
new file mode 100644
index 0000000..31f9e5a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseV1.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Class tests S3 Initiate MPU response.
+ */
+public class TestS3InitiateMultipartUploadResponseV1
+    extends TestS3InitiateMultipartUploadResponse {
+
+  @Test
+  public void testAddDBToBatch() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String prefix = "a/b/c/d/";
+    List<String> dirs = new ArrayList<String>();
+    dirs.add("a");
+    dirs.add("b");
+    dirs.add("c");
+    dirs.add("d");
+    String fileName = UUID.randomUUID().toString();
+    String keyName = prefix + fileName;
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    long parentID = 1027; // assume objectID of dir path "a/b/c/d" is 1027
+    List<OmDirectoryInfo> parentDirInfos = new ArrayList<>();
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseV1 =
+            createS3InitiateMPUResponseV1(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, parentDirInfos);
+
+    s3InitiateMultipartUploadResponseV1.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    // Do manual commit and see whether addToBatch is successful or not.
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(multipartKey);
+    Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
+    Assert.assertEquals("FileName mismatches!", fileName,
+            omKeyInfo.getKeyName());
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omKeyInfo.getParentObjectID());
+
+    OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager
+            .getMultipartInfoTable().get(multipartKey);
+    Assert.assertNotNull("Failed to find the multipartFileInfo",
+            omMultipartKeyInfo);
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omMultipartKeyInfo.getParentID());
+
+    Assert.assertEquals("Upload Id mismatches!", multipartUploadID,
+            omMultipartKeyInfo.getUploadID());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 4f50d9e..76ceb0e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.response.s3.multipart;
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.List;
 import java.util.UUID;
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -30,9 +31,11 @@ import org.junit.Rule;
 import org.junit.rules.TemporaryFolder;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .KeyInfo;
@@ -152,4 +155,47 @@ public class TestS3MultipartResponse {
             .setType(HddsProtos.ReplicationType.RATIS)
             .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
   }
+
+
+  public S3InitiateMultipartUploadResponse createS3InitiateMPUResponseV1(
+      String volumeName, String bucketName, long parentID, String keyName,
+      String multipartUploadID, List<OmDirectoryInfo> parentDirInfos) {
+    OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo.Builder()
+            .setUploadID(multipartUploadID)
+            .setCreationTime(Time.now())
+            .setReplicationType(HddsProtos.ReplicationType.RATIS)
+            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
+            .setParentID(parentID)
+            .build();
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+
+    OmKeyInfo omKeyInfo = new OmKeyInfo.Builder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(fileName)
+            .setFileName(fileName)
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setReplicationType(HddsProtos.ReplicationType.RATIS)
+            .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+            .setParentObjectID(parentID)
+            .build();
+
+    OMResponse omResponse = OMResponse.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload)
+            .setStatus(OzoneManagerProtocolProtos.Status.OK)
+            .setSuccess(true).setInitiateMultiPartUploadResponse(
+                    OzoneManagerProtocolProtos.MultipartInfoInitiateResponse
+                            .newBuilder().setVolumeName(volumeName)
+                            .setBucketName(bucketName)
+                            .setKeyName(keyName)
+                            .setMultipartUploadID(multipartUploadID)).build();
+
+    return new S3InitiateMultipartUploadResponseV1(omResponse, multipartKeyInfo,
+            omKeyInfo, parentDirInfos);
+  }
+
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 23/29: HDDS-4924. [FSO]S3Multipart: Implement OzoneBucket#listParts (#2016)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 2b657cd4d5800a57ca62964aacbe0fd009532ee5
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Mar 10 22:04:39 2021 +0530

    HDDS-4924. [FSO]S3Multipart: Implement OzoneBucket#listParts (#2016)
---
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |   2 +-
 .../rpc/TestOzoneClientMultipartUploadV1.java      | 265 +++++++++++++++++++++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  58 ++++-
 .../S3MultipartUploadCompleteRequest.java          |   1 -
 4 files changed, 321 insertions(+), 5 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index c63c21f..f1f6454 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -178,7 +178,7 @@ public final class OzoneFSUtils {
     if (fileName != null) {
       return fileName.toString();
     }
-    // failed to find a parent directory.
+    // no parent directory.
     return "";
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
index 76feec8..0e981d6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
@@ -18,15 +18,18 @@
 package org.apache.hadoop.ozone.client.rpc;
 
 import org.apache.commons.lang3.RandomUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.OzoneTestUtils;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
@@ -48,6 +51,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -58,9 +62,11 @@ import org.junit.rules.Timeout;
 import java.io.IOException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.UUID;
@@ -566,6 +572,265 @@ public class TestOzoneClientMultipartUploadV1 {
     // not making any assertion for the same.
   }
 
+  @Test
+  public void testListMultipartUploadParts() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/e/f/";
+    String keyName = parentDir + "file-ABC";
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    Map<Integer, String> partsMap = new TreeMap<>();
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+        ONE);
+    String partName1 = uploadPart(bucket, keyName, uploadID, 1,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(1, partName1);
+
+    String partName2 =uploadPart(bucket, keyName, uploadID, 2,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(2, partName2);
+
+    String partName3 =uploadPart(bucket, keyName, uploadID, 3,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(3, partName3);
+
+    OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
+        bucket.listParts(keyName, uploadID, 0, 3);
+
+    Assert.assertEquals(STAND_ALONE,
+        ozoneMultipartUploadPartListParts.getReplicationType());
+    Assert.assertEquals(3,
+        ozoneMultipartUploadPartListParts.getPartInfoList().size());
+
+    verifyPartNamesInDB(volumeName, bucketName, parentDir, keyName, partsMap,
+        ozoneMultipartUploadPartListParts, uploadID);
+
+    Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+  }
+
+  private void verifyPartNamesInDB(String volumeName, String bucketName,
+      String parentDir, String keyName, Map<Integer, String> partsMap,
+      OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts,
+      String uploadID) throws IOException {
+
+    List<String> listPartNames = new ArrayList<>();
+    String keyPartName = verifyPartNames(partsMap, 0,
+        ozoneMultipartUploadPartListParts);
+    listPartNames.add(keyPartName);
+
+    keyPartName = verifyPartNames(partsMap, 1,
+        ozoneMultipartUploadPartListParts);
+    listPartNames.add(keyPartName);
+
+    keyPartName = verifyPartNames(partsMap, 2,
+        ozoneMultipartUploadPartListParts);
+    listPartNames.add(keyPartName);
+
+    OMMetadataManager metadataMgr =
+        cluster.getOzoneManager().getMetadataManager();
+    String multipartKey = getMultipartKey(uploadID, volumeName, bucketName,
+        keyName, metadataMgr);
+    OmMultipartKeyInfo omMultipartKeyInfo =
+        metadataMgr.getMultipartInfoTable().get(multipartKey);
+    Assert.assertNotNull(omMultipartKeyInfo);
+
+    long parentID = getParentID(volumeName, bucketName, keyName, metadataMgr);
+    TreeMap<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap =
+        omMultipartKeyInfo.getPartKeyInfoMap();
+    for (Map.Entry<Integer, OzoneManagerProtocolProtos.PartKeyInfo> entry :
+        partKeyInfoMap.entrySet()) {
+      OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo = entry.getValue();
+      String partKeyName = partKeyInfo.getPartName();
+
+      // partKeyName format in DB - <parentID>/partFileName + ClientID
+      Assert.assertTrue("Invalid partKeyName format in DB",
+          partKeyName.startsWith(parentID + OzoneConsts.OM_KEY_PREFIX));
+      partKeyName = StringUtils.remove(partKeyName,
+          parentID + OzoneConsts.OM_KEY_PREFIX);
+
+      // reconstruct full part name with volume, bucket, partKeyName
+      String fullKeyPartName = metadataMgr.getOzoneKey(volumeName, bucketName,
+          parentDir + partKeyName);
+
+      listPartNames.remove(fullKeyPartName);
+    }
+
+    Assert.assertTrue("Wrong partKeyName format in DB!",
+        listPartNames.isEmpty());
+  }
+
+  private String verifyPartNames(Map<Integer, String> partsMap, int index,
+      OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts) {
+
+    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+            .getPartInfoList().get(index).getPartNumber()),
+        ozoneMultipartUploadPartListParts.getPartInfoList().get(index)
+            .getPartName());
+
+    return ozoneMultipartUploadPartListParts.getPartInfoList().get(index)
+        .getPartName();
+  }
+
+  @Test
+  public void testListMultipartUploadPartsWithContinuation()
+      throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    Map<Integer, String> partsMap = new TreeMap<>();
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+        ONE);
+    String partName1 = uploadPart(bucket, keyName, uploadID, 1,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(1, partName1);
+
+    String partName2 =uploadPart(bucket, keyName, uploadID, 2,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(2, partName2);
+
+    String partName3 =uploadPart(bucket, keyName, uploadID, 3,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(3, partName3);
+
+    OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
+        bucket.listParts(keyName, uploadID, 0, 2);
+
+    Assert.assertEquals(STAND_ALONE,
+        ozoneMultipartUploadPartListParts.getReplicationType());
+
+    Assert.assertEquals(2,
+        ozoneMultipartUploadPartListParts.getPartInfoList().size());
+
+    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+            .getPartInfoList().get(0).getPartNumber()),
+        ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
+            .getPartName());
+    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+            .getPartInfoList().get(1).getPartNumber()),
+        ozoneMultipartUploadPartListParts.getPartInfoList().get(1)
+            .getPartName());
+
+    // Get remaining
+    Assert.assertTrue(ozoneMultipartUploadPartListParts.isTruncated());
+    ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID,
+        ozoneMultipartUploadPartListParts.getNextPartNumberMarker(), 2);
+
+    Assert.assertEquals(1,
+        ozoneMultipartUploadPartListParts.getPartInfoList().size());
+    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+            .getPartInfoList().get(0).getPartNumber()),
+        ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
+            .getPartName());
+
+
+    // As we don't have any parts for this, we should get false here
+    Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+
+  }
+
+  @Test
+  public void testListPartsInvalidPartMarker() throws Exception {
+    try {
+      String volumeName = UUID.randomUUID().toString();
+      String bucketName = UUID.randomUUID().toString();
+      String keyName = UUID.randomUUID().toString();
+
+      store.createVolume(volumeName);
+      OzoneVolume volume = store.getVolume(volumeName);
+      volume.createBucket(bucketName);
+      OzoneBucket bucket = volume.getBucket(bucketName);
+
+      bucket.listParts(keyName, "random", -1, 2);
+      Assert.fail("Should throw exception as partNumber is an invalid number!");
+    } catch (IllegalArgumentException ex) {
+      GenericTestUtils.assertExceptionContains("Should be greater than or "
+          + "equal to zero", ex);
+    }
+  }
+
+  @Test
+  public void testListPartsInvalidMaxParts() throws Exception {
+    try {
+      String volumeName = UUID.randomUUID().toString();
+      String bucketName = UUID.randomUUID().toString();
+      String keyName = UUID.randomUUID().toString();
+
+      store.createVolume(volumeName);
+      OzoneVolume volume = store.getVolume(volumeName);
+      volume.createBucket(bucketName);
+      OzoneBucket bucket = volume.getBucket(bucketName);
+
+      bucket.listParts(keyName, "random", 1, -1);
+      Assert.fail("Should throw exception as max parts is an invalid number!");
+    } catch (IllegalArgumentException ex) {
+      GenericTestUtils.assertExceptionContains("Max Parts Should be greater "
+          + "than zero", ex);
+    }
+  }
+
+  @Test
+  public void testListPartsWithPartMarkerGreaterThanPartCount()
+      throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+        ONE);
+    uploadPart(bucket, keyName, uploadID, 1,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+
+
+    OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
+        bucket.listParts(keyName, uploadID, 100, 2);
+
+    // Should return empty
+
+    Assert.assertEquals(0,
+        ozoneMultipartUploadPartListParts.getPartInfoList().size());
+    Assert.assertEquals(STAND_ALONE,
+        ozoneMultipartUploadPartListParts.getReplicationType());
+
+    // As we don't have any parts with greater than partNumberMarker and list
+    // is not truncated, so it should return false here.
+    Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+
+  }
+
+  @Test
+  public void testListPartsWithInvalidUploadID() throws Exception {
+    OzoneTestUtils
+        .expectOmException(NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> {
+          String volumeName = UUID.randomUUID().toString();
+          String bucketName = UUID.randomUUID().toString();
+          String keyName = UUID.randomUUID().toString();
+
+          store.createVolume(volumeName);
+          OzoneVolume volume = store.getVolume(volumeName);
+          volume.createBucket(bucketName);
+          OzoneBucket bucket = volume.getBucket(bucketName);
+          OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
+              bucket.listParts(keyName, "random", 100, 2);
+        });
+  }
+
   private String verifyUploadedPart(String volumeName, String bucketName,
       String keyName, String uploadID, String partName,
       OMMetadataManager metadataMgr) throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 24afc5f..f45a96b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1416,8 +1416,8 @@ public class KeyManagerImpl implements KeyManager {
     metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
         bucketName);
     try {
-      String multipartKey = metadataManager.getMultipartKey(volumeName,
-          bucketName, keyName, uploadID);
+      String multipartKey = getMultipartKey(volumeName, bucketName,
+          keyName, uploadID);
 
       OmMultipartKeyInfo multipartKeyInfo =
           metadataManager.getMultipartInfoTable().get(multipartKey);
@@ -1445,8 +1445,10 @@ public class KeyManagerImpl implements KeyManager {
           // than part number marker
           if (partKeyInfoEntry.getKey() > partNumberMarker) {
             PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
+            String partName = getPartName(partKeyInfo, volumeName, bucketName,
+                keyName);
             OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(),
-                partKeyInfo.getPartName(),
+                partName,
                 partKeyInfo.getPartKeyInfo().getModificationTime(),
                 partKeyInfo.getPartKeyInfo().getDataSize());
             omPartInfoList.add(omPartInfo);
@@ -1506,6 +1508,56 @@ public class KeyManagerImpl implements KeyManager {
     }
   }
 
+  private String getPartName(PartKeyInfo partKeyInfo, String volName,
+                             String buckName, String keyName) {
+
+    String partName = partKeyInfo.getPartName();
+
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      String parentDir = OzoneFSUtils.getParentDir(keyName);
+      String partFileName = OzoneFSUtils.getFileName(partKeyInfo.getPartName());
+
+      StringBuilder fullKeyPartName = new StringBuilder();
+      fullKeyPartName.append(OZONE_URI_DELIMITER);
+      fullKeyPartName.append(volName);
+      fullKeyPartName.append(OZONE_URI_DELIMITER);
+      fullKeyPartName.append(buckName);
+      if (StringUtils.isNotEmpty(parentDir)) {
+        fullKeyPartName.append(OZONE_URI_DELIMITER);
+        fullKeyPartName.append(parentDir);
+      }
+      fullKeyPartName.append(OZONE_URI_DELIMITER);
+      fullKeyPartName.append(partFileName);
+
+      return fullKeyPartName.toString();
+    }
+    return partName;
+  }
+
+  private String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String uploadID) throws IOException {
+
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      OMMetadataManager metaMgr = ozoneManager.getMetadataManager();
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+      String bucketKey = metaMgr.getBucketKey(volumeName, bucketName);
+      OmBucketInfo omBucketInfo =
+          metaMgr.getBucketTable().get(bucketKey);
+      long bucketId = omBucketInfo.getObjectID();
+      long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+          keyName, metaMgr);
+
+      String multipartKey = metaMgr.getMultipartKey(parentID, fileName,
+          uploadID);
+
+      return multipartKey;
+    } else {
+      return metadataManager.getMultipartKey(volumeName,
+          bucketName, keyName, uploadID);
+    }
+  }
+
   /**
    * Add acl for Ozone object. Return true if acl is added successfully else
    * false.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index 162cf2f..8fcf992 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 19/29: HDDS-4513.[FSO]OzoneContract unit test case fixes (#1945)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit f61d74e53a74974b31f7a3bfdb80aa6a9c69ed74
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Thu Feb 25 10:09:42 2021 +0530

    HDDS-4513.[FSO]OzoneContract unit test case fixes (#1945)
---
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 61 +++-------------------
 .../ozone/contract/ITestOzoneContractCreate.java   | 26 +++++++--
 .../ozone/contract/ITestOzoneContractDelete.java   | 26 +++++++--
 .../contract/ITestOzoneContractGetFileStatus.java  | 27 ++++++++--
 .../fs/ozone/contract/ITestOzoneContractMkdir.java | 26 +++++++--
 .../fs/ozone/contract/ITestOzoneContractOpen.java  | 27 ++++++++--
 .../ozone/contract/ITestOzoneContractRename.java   | 26 +++++++--
 .../ozone/contract/ITestOzoneContractRootDir.java  | 26 +++++++--
 .../fs/ozone/contract/ITestOzoneContractSeek.java  | 27 ++++++++--
 .../ozone/contract/ITestOzoneContractUnbuffer.java | 26 +++++++--
 .../fs/ozone/contract/ITestOzoneContractUtils.java | 60 +++++++++++++++++++++
 .../hadoop/fs/ozone/contract/OzoneContract.java    | 13 +++++
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      | 11 ++--
 13 files changed, 288 insertions(+), 94 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index eb7eaca..ed62990 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.junit.Assert;
 import org.junit.After;
 import org.junit.BeforeClass;
@@ -278,36 +277,6 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   }
 
   /**
-   * Case-1) fromKeyName should exist, otw throws exception.
-   */
-  @Test
-  public void testRenameWithNonExistentSource() throws Exception {
-    // Skip as this will run only in new layout
-    if (!isEnabledFileSystemPaths()) {
-      return;
-    }
-
-    final String root = "/root";
-    final String dir1 = root + "/dir1";
-    final String dir2 = root + "/dir2";
-    final Path source = new Path(getFs().getUri().toString() + dir1);
-    final Path destin = new Path(getFs().getUri().toString() + dir2);
-
-    // creates destin
-    getFs().mkdirs(destin);
-    LOG.info("Created destin dir: {}", destin);
-
-    LOG.info("Rename op-> source:{} to destin:{}}", source, destin);
-    try {
-      getFs().rename(source, destin);
-      Assert.fail("Should throw exception : Source doesn't exist!");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
-    }
-  }
-
-  /**
    * Case-2) Cannot rename a directory to its own subdirectory.
    */
   @Test
@@ -327,14 +296,8 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
 
     final Path sourceRoot = new Path(getFs().getUri().toString() + root);
     LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
-    try {
-      getFs().rename(sourceRoot, subDir1);
-      Assert.fail("Should throw exception : Cannot rename a directory to" +
-              " its own subdirectory");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
-    }
+    //  rename should fail and return false
+    Assert.assertFalse(getFs().rename(sourceRoot, subDir1));
   }
 
   /**
@@ -354,30 +317,18 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     final String dir2 = dir1 + "/dir2";
     final Path dir2SourcePath = new Path(getFs().getUri().toString() + dir2);
     getFs().mkdirs(dir2SourcePath);
-
     // (a) parent of dst does not exist.  /root_dir/b/c
     final Path destinPath = new Path(getFs().getUri().toString()
             + root + "/b/c");
-    try {
-      getFs().rename(dir2SourcePath, destinPath);
-      Assert.fail("Should fail as parent of dst does not exist!");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
-    }
 
+    // rename should fail and return false
+    Assert.assertFalse(getFs().rename(dir2SourcePath, destinPath));
     // (b) parent of dst is a file. /root_dir/file1/c
     Path filePath = new Path(getFs().getUri().toString() + root + "/file1");
     ContractTestUtils.touch(getFs(), filePath);
-
     Path newDestinPath = new Path(filePath, "c");
-    try {
-      getFs().rename(dir2SourcePath, newDestinPath);
-      Assert.fail("Should fail as parent of dst is a file!");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_RENAME_ERROR);
-    }
+    // rename should fail and return false
+    Assert.assertFalse(getFs().rename(dir2SourcePath, newDestinPath));
   }
 
   @Override
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
index 19ff428..034cf1e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests creating files.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractCreate extends AbstractContractCreateTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractCreate(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractCreate.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -46,4 +59,9 @@ public class ITestOzoneContractCreate extends AbstractContractCreateTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
index 33e6260..1381a2c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests covering deletes.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractDelete extends AbstractContractDeleteTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractDelete(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractDelete.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -46,4 +59,9 @@ public class ITestOzoneContractDelete extends AbstractContractDeleteTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
index 9d9aa56..04a3fb5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
@@ -19,28 +19,42 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Ozone contract tests covering getFileStatus.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractGetFileStatus
     extends AbstractContractGetFileStatusTest {
 
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractGetFileStatus(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
   private static final Logger LOG =
       LoggerFactory.getLogger(ITestOzoneContractGetFileStatus.class);
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractGetFileStatus.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -63,4 +77,9 @@ public class ITestOzoneContractGetFileStatus
   protected Configuration createConfiguration() {
     return super.createConfiguration();
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
index 305164c..862b2b9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Test dir operations on Ozone.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractMkdir extends AbstractContractMkdirTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractMkdir(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractMkdir.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -46,4 +59,9 @@ public class ITestOzoneContractMkdir extends AbstractContractMkdirTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
index aa81965..83a6306 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
@@ -19,21 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests opening files.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractOpen extends AbstractContractOpenTest {
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractOpen(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractOpen.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -45,4 +59,9 @@ public class ITestOzoneContractOpen extends AbstractContractOpenTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
index 3660d81..2fa1c64 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests covering rename.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractRename extends AbstractContractRenameTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractRename(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractRename.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -47,4 +60,9 @@ public class ITestOzoneContractRename extends AbstractContractRenameTest {
     return new OzoneContract(conf);
   }
 
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
index c64dafa..5ca5bc3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
@@ -19,23 +19,36 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract test for ROOT directory operations.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractRootDir extends
     AbstractContractRootDirectoryTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractRootDir(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractRootDir.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -48,4 +61,9 @@ public class ITestOzoneContractRootDir extends
     return new OzoneContract(conf);
   }
 
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
index 2f22025..9457bb8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
@@ -19,21 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests covering file seek.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractSeek extends AbstractContractSeekTest {
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractSeek(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractSeek.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -45,4 +59,9 @@ public class ITestOzoneContractSeek extends AbstractContractSeekTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
index e40b22e..7f55774 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
@@ -21,18 +21,31 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import java.io.IOException;
+import java.util.Collection;
 
 /**
  * Ozone contract tests for {@link org.apache.hadoop.fs.CanUnbuffer#unbuffer}.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractUnbuffer extends AbstractContractUnbufferTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractUnbuffer(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractUnbuffer.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -44,4 +57,9 @@ public class ITestOzoneContractUnbuffer extends AbstractContractUnbufferTest {
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java
new file mode 100644
index 0000000..1926bd2
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone.contract;
+
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Utility class for Ozone-contract tests.
+ */
+public final class ITestOzoneContractUtils {
+
+  private ITestOzoneContractUtils(){}
+
+  private static List<Object> fsoCombinations = Arrays.asList(new Object[] {
+      // FSO configuration is a cluster level server side configuration.
+      // If the cluster is configured with V0 layout version,
+      // V0 bucket will created.
+      // If the cluster is configured with V1 layout version,
+      // V1 bucket will be created.
+      // Presently, OzoneClient checks bucketMetadata then invokes V1 or V0
+      // specific code and it makes no sense to add client side configs now.
+      // Once the specific client API to set FSO or non-FSO bucket is provided
+      // the contract test can be refactored to include another parameter
+      // (fsoClient) which sets/unsets the client side configs.
+      true, // Server is configured with new layout (V1)
+      // and new buckets will be operated on
+      false // Server is configured with old layout (V0)
+      // and old buckets will be operated on
+  });
+
+  static List<Object> getFsoCombinations(){
+    return fsoCombinations;
+  }
+
+  public static void restartCluster(boolean fsOptimizedServer)
+      throws IOException {
+    OzoneContract.destroyCluster();
+    OzoneContract.initOzoneConfiguration(
+        fsOptimizedServer);
+    OzoneContract.createCluster();
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
index 104b10c..f401c06 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -46,6 +46,8 @@ class OzoneContract extends AbstractFSContract {
   private static MiniOzoneCluster cluster;
   private static final String CONTRACT_XML = "contract/ozone.xml";
 
+  private static boolean fsOptimizedServer;
+
   OzoneContract(Configuration conf) {
     super(conf);
     //insert the base features
@@ -63,6 +65,10 @@ class OzoneContract extends AbstractFSContract {
     return path;
   }
 
+  public static void initOzoneConfiguration(boolean fsoServer){
+    fsOptimizedServer = fsoServer;
+  }
+
   public static void createCluster() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
     DatanodeRatisServerConfig ratisServerConfig =
@@ -79,6 +85,13 @@ class OzoneContract extends AbstractFSContract {
 
     conf.addResource(CONTRACT_XML);
 
+    if (fsOptimizedServer){
+      conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+          true);
+      conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION,
+          OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+    }
+
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
     try {
       cluster.waitForClusterToBeReady();
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 1f3cf6e..a1eca1e 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -414,9 +414,11 @@ public class BasicOzoneFileSystem extends FileSystem {
     try {
       adapter.renameKey(srcPath, dstPath);
     } catch (OMException ome) {
-      LOG.error("rename key failed: {}. source:{}, destin:{}",
-              ome.getMessage(), srcPath, dstPath);
-      if (OMException.ResultCodes.KEY_ALREADY_EXISTS == ome.getResult()) {
+      LOG.error("rename key failed: {}. Error code: {} source:{}, destin:{}",
+              ome.getMessage(), ome.getResult(), srcPath, dstPath);
+      if (OMException.ResultCodes.KEY_ALREADY_EXISTS == ome.getResult() ||
+          OMException.ResultCodes.KEY_RENAME_ERROR  == ome.getResult() ||
+          OMException.ResultCodes.KEY_NOT_FOUND == ome.getResult()) {
         return false;
       } else {
         throw ome;
@@ -508,6 +510,9 @@ public class BasicOzoneFileSystem extends FileSystem {
 
     if (adapter.isFSOptimizedBucket()) {
       if (f.isRoot()) {
+        if (!recursive && listStatus(f).length!=0){
+          throw new PathIsNotEmptyDirectoryException(f.toString());
+        }
         LOG.warn("Cannot delete root directory.");
         return false;
       }

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 10/29: HDDS-4658. LookupKey: do lookup in dir and file tables (#1775)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 21ffe0e3884d6ea0f93f827b61c5dee0fd6e7b8a
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Wed Jan 13 08:36:37 2021 +0530

    HDDS-4658. LookupKey: do lookup in dir and file tables (#1775)
---
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 15 ++++
 .../hadoop/ozone/client/rpc/TestReadRetries.java   | 55 +++++++++++---
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 83 ++++++++++++++++++++++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 32 +++++++--
 .../ozone/om/request/key/OMKeyCreateRequestV1.java |  3 +-
 5 files changed, 175 insertions(+), 13 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index 2938714..e574e94 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -325,6 +325,11 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
    */
   @Test
   public void testRenameWithNonExistentSource() throws Exception {
+    // Skip as this will run only in new layout
+    if (!enabledFileSystemPaths) {
+      return;
+    }
+
     final String root = "/root";
     final String dir1 = root + "/dir1";
     final String dir2 = root + "/dir2";
@@ -350,6 +355,11 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
    */
   @Test
   public void testRenameDirToItsOwnSubDir() throws Exception {
+    // Skip as this will run only in new layout
+    if (!enabledFileSystemPaths) {
+      return;
+    }
+
     final String root = "/root";
     final String dir1 = root + "/dir1";
     final Path dir1Path = new Path(fs.getUri().toString() + dir1);
@@ -377,6 +387,11 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
    */
   @Test
   public void testRenameDestinationParentDoesntExist() throws Exception {
+    // Skip as this will run only in new layout
+    if (!enabledFileSystemPaths) {
+      return;
+    }
+
     final String root = "/root_dir";
     final String dir1 = root + "/dir1";
     final String dir2 = dir1 + "/dir2";
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index 91e187c..a7dee52 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.ozone.client.rpc;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.UUID;
@@ -47,24 +49,32 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.KeyOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.junit.Assert.fail;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.rules.ExpectedException;
 
 /**
  * Test read retries from multiple nodes in the pipeline.
  */
+@RunWith(Parameterized.class)
 public class TestReadRetries {
 
   /**
@@ -84,16 +94,27 @@ public class TestReadRetries {
       storageContainerLocationClient;
 
   private static final String SCM_ID = UUID.randomUUID().toString();
+  private String layoutVersion;
 
+  public TestReadRetries(String layoutVersion) {
+    this.layoutVersion = layoutVersion;
+  }
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[]{"V0"}, new Object[]{"V1"});
+  }
 
   /**
    * Create a MiniOzoneCluster for testing.
    * @throws Exception
    */
-  @BeforeClass
-  public static void init() throws Exception {
+  @Before
+  public void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, layoutVersion);
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .setScmId(SCM_ID)
@@ -112,8 +133,8 @@ public class TestReadRetries {
   /**
    * Close OzoneClient and shutdown MiniOzoneCluster.
    */
-  @AfterClass
-  public static void shutdown() throws IOException {
+  @After
+  public void shutdown() throws IOException {
     if(ozClient != null) {
       ozClient.close();
     }
@@ -140,7 +161,7 @@ public class TestReadRetries {
     volume.createBucket(bucketName);
     OzoneBucket bucket = volume.getBucket(bucketName);
 
-    String keyName = UUID.randomUUID().toString();
+    String keyName = "a/b/c/" + UUID.randomUUID().toString();
 
     OzoneOutputStream out = bucket
         .createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS,
@@ -188,6 +209,13 @@ public class TestReadRetries {
     cluster.shutdownHddsDatanode(datanodeDetails);
     // try to read, this should be successful
     readKey(bucket, keyName, value);
+
+    // read intermediate directory
+    verifyIntermediateDir(bucket, "a/b/c/");
+    verifyIntermediateDir(bucket, "a/b/c");
+    verifyIntermediateDir(bucket, "/a/b/c/");
+    verifyIntermediateDir(bucket, "/a/b/c");
+
     // shutdown the second datanode
     datanodeDetails = datanodes.get(1);
     cluster.shutdownHddsDatanode(datanodeDetails);
@@ -210,6 +238,17 @@ public class TestReadRetries {
     factory.releaseClient(clientSpi, false);
   }
 
+  private void verifyIntermediateDir(OzoneBucket bucket,
+      String dir) throws IOException {
+    try {
+      bucket.getKey(dir);
+      fail("Should throw exception for directory listing");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ome.getResult());
+    }
+  }
+
   private void readKey(OzoneBucket bucket, String keyName, String data)
       throws IOException {
     OzoneKey key = bucket.getKey(keyName);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index c6ae4ca..ee127cf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -26,8 +26,10 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
@@ -43,6 +45,9 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.UUID;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
 public class TestObjectStoreV1 {
 
   private static MiniOzoneCluster cluster = null;
@@ -133,6 +138,84 @@ public class TestObjectStoreV1 {
             true);
   }
 
+  @Test
+  public void testLookupKey() throws Exception {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String parent = "a/b/c/";
+    String file = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + file;
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    objectStore.createVolume(volumeName);
+
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    ozoneVolume.createBucket(bucketName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    Table<String, OmKeyInfo> openKeyTable =
+            cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
+
+    String data = "random data";
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+            data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
+            new HashMap<>());
+
+    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+
+    // after file creation
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            false);
+
+    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+
+    // open key
+    try {
+      ozoneBucket.getKey(key);
+      fail("Should throw exception as file is not visible and its still " +
+              "open for writing!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
+    }
+
+    ozoneOutputStream.close();
+
+    OzoneKeyDetails keyDetails = ozoneBucket.getKey(key);
+    Assert.assertEquals(key, keyDetails.getName());
+
+    Table<String, OmKeyInfo> keyTable =
+            cluster.getOzoneManager().getMetadataManager().getKeyTable();
+
+    // When closing the key, entry should be removed from openFileTable
+    // and it should be added to fileTable.
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            true);
+
+    ozoneBucket.deleteKey(key);
+
+    // get deleted key
+    try {
+      ozoneBucket.getKey(key);
+      fail("Should throw exception as file not exists!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
+    }
+
+    // after key delete
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), true);
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            true);
+  }
+
   private OmDirectoryInfo getDirInfo(String volumeName, String bucketName,
       String parentKey) throws Exception {
     OMMetadataManager omMetadataManager =
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index dc70369..db28ff7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -658,9 +658,11 @@ public class KeyManagerImpl implements KeyManager {
         bucketName);
     OmKeyInfo value = null;
     try {
-      String keyBytes = metadataManager.getOzoneKey(
-          volumeName, bucketName, keyName);
-      value = metadataManager.getKeyTable().get(keyBytes);
+      if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+        value = getOmKeyInfoV1(volumeName, bucketName, keyName);
+      } else {
+        value = getOmKeyInfo(volumeName, bucketName, keyName);
+      }
     } catch (IOException ex) {
       if (ex instanceof OMException) {
         throw ex;
@@ -680,7 +682,7 @@ public class KeyManagerImpl implements KeyManager {
         LOG.debug("volume:{} bucket:{} Key:{} not found", volumeName,
                 bucketName, keyName);
       }
-      throw new OMException("Key not found", KEY_NOT_FOUND);
+      throw new OMException("Key:" + keyName + " not found", KEY_NOT_FOUND);
     }
 
     // add block token for read.
@@ -697,6 +699,28 @@ public class KeyManagerImpl implements KeyManager {
     return value;
   }
 
+  private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+                                 String keyName) throws IOException {
+    String keyBytes = metadataManager.getOzoneKey(
+            volumeName, bucketName, keyName);
+    return metadataManager.getKeyTable().get(keyBytes);
+  }
+
+  /**
+   * Look up will return only closed fileInfo. This will return null if the
+   * keyName is a directory or if the keyName is still open for writing.
+   */
+  private OmKeyInfo getOmKeyInfoV1(String volumeName, String bucketName,
+                                   String keyName) throws IOException {
+    OzoneFileStatus fileStatus =
+            OMFileRequest.getOMKeyInfoIfExists(metadataManager,
+                    volumeName, bucketName, keyName, scmBlockSize);
+    if (fileStatus == null) {
+      return null;
+    }
+    return fileStatus.isFile() ? fileStatus.getKeyInfo() : null;
+  }
+
   private void addBlockToken4Read(OmKeyInfo value) throws IOException {
     Preconditions.checkNotNull(value, "OMKeyInfo cannot be null");
     if (grpcBlockTokenEnabled) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
index 416e462..a49c01e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
@@ -196,8 +196,9 @@ public class OMKeyCreateRequestV1 extends OMKeyCreateRequest {
 
       // Prepare response. Sets user given full key name in the 'keyName'
       // attribute in response object.
+      int clientVersion = getOmRequest().getVersion();
       omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
-              .setKeyInfo(omFileInfo.getProtobuf(keyName))
+              .setKeyInfo(omFileInfo.getProtobuf(keyName, clientVersion))
               .setID(clientID)
               .setOpenVersion(openVersion).build())
               .setCmdType(Type.CreateKey);

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 03/29: HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 30702762944d920456fd20bab14ba15280fae9bc
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Sat Oct 31 06:32:23 2020 +0530

    HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)
---
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  31 ++
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   |  18 +-
 .../hadoop/fs/ozone/TestOzoneFileInterfacesV1.java |  66 ++++
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  49 ++-
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 415 +++++++++++++++++++++
 .../ozone/freon/TestHadoopDirTreeGenerator.java    |  22 +-
 .../ozone/freon/TestHadoopDirTreeGeneratorV1.java  |  33 ++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 398 ++++++++++++++++++++
 .../request/file/OMDirectoryCreateRequestV1.java   |   2 -
 .../ozone/om/request/file/OMFileRequest.java       | 117 +++++-
 10 files changed, 1133 insertions(+), 18 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index 96df56f..63bfd8f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.util.StringUtils;
 import javax.annotation.Nonnull;
 import java.nio.file.Paths;
 
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 
 /**
@@ -131,4 +132,34 @@ public final class OzoneFSUtils {
     // failed to converts a path key
     return keyName;
   }
+
+  /**
+   * Verifies whether the childKey is an immediate path under the given
+   * parentKey.
+   *
+   * @param parentKey parent key name
+   * @param childKey  child key name
+   * @return true if childKey is an immediate path under the given parentKey
+   */
+  public static boolean isImmediateChild(String parentKey, String childKey) {
+
+    // Empty childKey has no parent, so just returning false.
+    if (org.apache.commons.lang3.StringUtils.isBlank(childKey)) {
+      return false;
+    }
+    java.nio.file.Path parentPath = Paths.get(parentKey);
+    java.nio.file.Path childPath = Paths.get(childKey);
+
+    java.nio.file.Path childParent = childPath.getParent();
+    // Following are the valid parentKey formats:
+    // parentKey="" or parentKey="/" or parentKey="/a" or parentKey="a"
+    // Following are the valid childKey formats:
+    // childKey="/" or childKey="/a/b" or childKey="a/b"
+    if (org.apache.commons.lang3.StringUtils.isBlank(parentKey)) {
+      return childParent == null ||
+              OM_KEY_PREFIX.equals(childParent.toString());
+    }
+
+    return parentPath.equals(childParent);
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
index ccb16f4..3d4fe2f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
@@ -56,6 +56,8 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
+
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Assert;
 
@@ -119,7 +121,8 @@ public class TestOzoneFileInterfaces {
 
   private OMMetrics omMetrics;
 
-  private boolean enableFileSystemPaths;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected boolean enableFileSystemPaths;
 
   public TestOzoneFileInterfaces(boolean setDefaultFs,
       boolean useAbsolutePath, boolean enabledFileSystemPaths) {
@@ -134,9 +137,8 @@ public class TestOzoneFileInterfaces {
     volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
     bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
 
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-        enableFileSystemPaths);
+    OzoneConfiguration conf = getOzoneConfiguration();
+
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .build();
@@ -161,6 +163,14 @@ public class TestOzoneFileInterfaces {
     omMetrics = cluster.getOzoneManager().getMetrics();
   }
 
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+        enableFileSystemPaths);
+    return conf;
+  }
+
   @After
   public void teardown() throws IOException {
     if (cluster != null) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
new file mode 100644
index 0000000..93473be
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Test OzoneFileSystem Interfaces layout version V1.
+ *
+ * This test will test the various interfaces i.e.
+ * create, read, write, getFileStatus
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneFileInterfacesV1 extends TestOzoneFileInterfaces {
+
+  public TestOzoneFileInterfacesV1(boolean setDefaultFs,
+      boolean useAbsolutePath, boolean enabledFileSystemPaths) {
+    super(setDefaultFs, useAbsolutePath, enabledFileSystemPaths);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+            enableFileSystemPaths);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return conf;
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testDirectory() {
+
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testOzFsReadWrite() {
+
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 6f13b0d..05b8d8b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -121,15 +121,24 @@ public class TestOzoneFileSystem {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestOzoneFileSystem.class);
 
-  private static boolean enabledFileSystemPaths;
-  private static boolean omRatisEnabled;
-
-  private static MiniOzoneCluster cluster;
-  private static FileSystem fs;
-  private static OzoneFileSystem o3fs;
-  private static String volumeName;
-  private static String bucketName;
-  private static Trash trash;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static boolean enabledFileSystemPaths;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static boolean omRatisEnabled;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static MiniOzoneCluster cluster;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static FileSystem fs;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static OzoneFileSystem o3fs;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static String volumeName;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static String bucketName;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static int rootItemCount;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static Trash trash;
 
   private void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
@@ -229,6 +238,28 @@ public class TestOzoneFileSystem {
     } catch (FileAlreadyExistsException fae) {
       // ignore as its expected
     }
+
+    // Directory
+    FileStatus fileStatus = fs.getFileStatus(parent);
+    assertEquals("FileStatus did not return the directory",
+            "/d1/d2/d3/d4", fileStatus.getPath().toUri().getPath());
+    assertTrue("FileStatus did not return the directory",
+            fileStatus.isDirectory());
+
+    // invalid sub directory
+    try{
+      fs.getFileStatus(new Path("/d1/d2/d3/d4/key3/invalid"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
+    // invalid file name
+    try{
+      fs.getFileStatus(new Path("/d1/d2/d3/d4/invalidkey"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
new file mode 100644
index 0000000..6868040
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -0,0 +1,415 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.junit.Assert;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+/**
+ * Ozone file system tests that are not covered by contract tests,
+ * layout version V1.
+ *
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
+
+  @Ignore("TODO:HDDS-2939")
+  @BeforeClass
+  public static void init() throws Exception {
+
+  }
+
+  public TestOzoneFileSystemV1(boolean setDefaultFs, boolean enableOMRatis) {
+    super(setDefaultFs, enableOMRatis);
+  }
+
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+            enabledFileSystemPaths);
+    if (enabledFileSystemPaths) {
+      conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    }
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .build();
+    cluster.waitForClusterToBeReady();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+
+    fs = FileSystem.get(conf);
+    trash = new Trash(conf);
+    o3fs = (OzoneFileSystem) fs;
+  }
+
+  @After
+  @Override
+  public void cleanup() {
+    super.cleanup();
+    try {
+      tableCleanup();
+    } catch (IOException e) {
+      LOG.info("Failed to cleanup DB tables.", e);
+      fail("Failed to cleanup DB tables." + e.getMessage());
+    }
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  /**
+   * Set a timeout for each test.
+   */
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestOzoneFileSystemV1.class);
+
+  @Test
+  public void testListStatusWithoutRecursiveSearch() throws Exception {
+    /*
+     * Op 1. create file -> /key1
+     * Op 2. create dir -> /d1/d2
+     * Op 3. create dir -> /d1/d3
+     * Op 4. create dir -> /d1/d4
+     * Op 5. create file -> /d1/key1
+     * Op 6. create file -> /d2/key1
+     * Op 7. create file -> /d1/d2/key1
+     */
+    Path key1 = new Path("/key1");
+    try (FSDataOutputStream outputStream = fs.create(key1, false)) {
+      assertNotNull("Should be able to create file: key1",
+              outputStream);
+    }
+    Path d1 = new Path("/d1");
+    Path dir1Key1 = new Path(d1, "key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir1Key1,
+              outputStream);
+    }
+    Path d2 = new Path("/d2");
+    Path dir2Key1 = new Path(d2, "key1");
+    try (FSDataOutputStream outputStream = fs.create(dir2Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir2Key1,
+              outputStream);
+    }
+    Path dir1Dir2 = new Path("/d1/d2/");
+    Path dir1Dir2Key1 = new Path(dir1Dir2, "key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Dir2Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir1Dir2Key1,
+              outputStream);
+    }
+    Path d1Key2 = new Path(d1, "key2");
+    try (FSDataOutputStream outputStream = fs.create(d1Key2, false)) {
+      assertNotNull("Should be able to create file: " + d1Key2,
+              outputStream);
+    }
+
+    Path dir1Dir3 = new Path("/d1/d3/");
+    Path dir1Dir4 = new Path("/d1/d4/");
+
+    fs.mkdirs(dir1Dir3);
+    fs.mkdirs(dir1Dir4);
+
+    // Root Directory
+    FileStatus[] fileStatusList = fs.listStatus(new Path("/"));
+    assertEquals("FileStatus should return files and directories",
+            3, fileStatusList.length);
+    ArrayList<String> expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d2");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-1 sub-dirs
+    fileStatusList = fs.listStatus(new Path("/d1"));
+    assertEquals("FileStatus should return files and directories",
+            5, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d3");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d4");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/key1");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/key2");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-2 sub-dirs
+    fileStatusList = fs.listStatus(new Path("/d1/d2"));
+    assertEquals("FileStatus should return files and directories",
+            1, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2/" +
+            "key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-2 key2
+    fileStatusList = fs.listStatus(new Path("/d1/d2/key1"));
+    assertEquals("FileStatus should return files and directories",
+            1, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2/" +
+            "key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // invalid root key
+    try {
+      fileStatusList = fs.listStatus(new Path("/key2"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
+    try {
+      fileStatusList = fs.listStatus(new Path("/d1/d2/key2"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+
+    }
+  }
+
+  @Test
+  public void testListFilesRecursive() throws Exception {
+    /*
+     * Op 1. create file -> /d1/d1/d2/key1
+     * Op 2. create dir -> /key1
+     * Op 3. create dir -> /key2
+     * Op 4. create dir -> /d1/d2/d1/d2/key1
+     */
+    Path dir1Dir1Dir2Key1 = new Path("/d1/d1/d2/key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Dir1Dir2Key1,
+            false)) {
+      assertNotNull("Should be able to create file: " + dir1Dir1Dir2Key1,
+              outputStream);
+    }
+    Path key1 = new Path("/key1");
+    try (FSDataOutputStream outputStream = fs.create(key1, false)) {
+      assertNotNull("Should be able to create file: " + key1,
+              outputStream);
+    }
+    Path key2 = new Path("/key2");
+    try (FSDataOutputStream outputStream = fs.create(key2, false)) {
+      assertNotNull("Should be able to create file: key2",
+              outputStream);
+    }
+    Path dir1Dir2Dir1Dir2Key1 = new Path("/d1/d2/d1/d2/key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Dir2Dir1Dir2Key1,
+            false)) {
+      assertNotNull("Should be able to create file: "
+              + dir1Dir2Dir1Dir2Key1, outputStream);
+    }
+    RemoteIterator<LocatedFileStatus> fileStatusItr = fs.listFiles(new Path(
+            "/"), true);
+    String uriPrefix = "o3fs://" + bucketName + "." + volumeName;
+    ArrayList<String> expectedPaths = new ArrayList<>();
+    expectedPaths.add(uriPrefix + dir1Dir1Dir2Key1.toString());
+    expectedPaths.add(uriPrefix + key1.toString());
+    expectedPaths.add(uriPrefix + key2.toString());
+    expectedPaths.add(uriPrefix + dir1Dir2Dir1Dir2Key1.toString());
+    int expectedFilesCount = expectedPaths.size();
+    int actualCount = 0;
+    while (fileStatusItr.hasNext()) {
+      LocatedFileStatus status = fileStatusItr.next();
+      expectedPaths.remove(status.getPath().toString());
+      actualCount++;
+    }
+    assertEquals("Failed to get all the files: " + expectedPaths,
+            expectedFilesCount, actualCount);
+    assertEquals("Failed to get all the files: " + expectedPaths, 0,
+            expectedPaths.size());
+
+    // Recursive=false
+    fileStatusItr = fs.listFiles(new Path("/"), false);
+    expectedPaths.clear();
+    expectedPaths.add(uriPrefix + "/key1");
+    expectedPaths.add(uriPrefix + "/key2");
+    expectedFilesCount = expectedPaths.size();
+    actualCount = 0;
+    while (fileStatusItr.hasNext()) {
+      LocatedFileStatus status = fileStatusItr.next();
+      expectedPaths.remove(status.getPath().toString());
+      actualCount++;
+    }
+    assertEquals("Failed to get all the files: " + expectedPaths, 0,
+            expectedPaths.size());
+    assertEquals("Failed to get all the files: " + expectedPaths,
+            expectedFilesCount, actualCount);
+  }
+
+  /**
+   * Cleanup keyTable and directoryTable explicitly as FS delete operation
+   * is not yet supported.
+   *
+   * @throws IOException DB failure
+   */
+  protected void tableCleanup() throws IOException {
+    OMMetadataManager metadataMgr = cluster.getOzoneManager()
+            .getMetadataManager();
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmDirectoryInfo>> dirTableIterator =
+            metadataMgr.getDirectoryTable().iterator();
+    dirTableIterator.seekToFirst();
+    ArrayList <String> dirList = new ArrayList<>();
+    while (dirTableIterator.hasNext()) {
+      String key = dirTableIterator.key();
+      if (StringUtils.isNotBlank(key)) {
+        dirList.add(key);
+      }
+      dirTableIterator.next();
+    }
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            cacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
+    while(cacheIterator.hasNext()){
+      cacheIterator.next();
+      cacheIterator.remove();
+    }
+
+    for (String dirKey : dirList) {
+      metadataMgr.getDirectoryTable().delete(dirKey);
+      Assert.assertNull("Unexpected entry!",
+              metadataMgr.getDirectoryTable().get(dirKey));
+    }
+
+    Assert.assertTrue("DirTable is not empty",
+            metadataMgr.getDirectoryTable().isEmpty());
+
+    Assert.assertFalse(metadataMgr.getDirectoryTable().cacheIterator()
+            .hasNext());
+
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmKeyInfo>> keyTableIterator =
+            metadataMgr.getKeyTable().iterator();
+    keyTableIterator.seekToFirst();
+    ArrayList <String> fileList = new ArrayList<>();
+    while (keyTableIterator.hasNext()) {
+      String key = keyTableIterator.key();
+      if (StringUtils.isNotBlank(key)) {
+        fileList.add(key);
+      }
+      keyTableIterator.next();
+    }
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            keyCacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
+    while(keyCacheIterator.hasNext()){
+      keyCacheIterator.next();
+      keyCacheIterator.remove();
+    }
+
+    for (String fileKey : fileList) {
+      metadataMgr.getKeyTable().delete(fileKey);
+      Assert.assertNull("Unexpected entry!",
+              metadataMgr.getKeyTable().get(fileKey));
+    }
+
+    Assert.assertTrue("KeyTable is not empty",
+            metadataMgr.getKeyTable().isEmpty());
+
+    rootItemCount = 0;
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testTrash() throws Exception {
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testRenameToTrashEnabled() throws Exception {
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
index 6ad5b8d..776551b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
@@ -31,12 +31,15 @@ import org.apache.ratis.server.raftlog.RaftLog;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URI;
+import java.util.ArrayList;
 
 /**
  * Test for HadoopDirTreeGenerator.
@@ -47,6 +50,8 @@ public class TestHadoopDirTreeGenerator {
   private OzoneConfiguration conf = null;
   private MiniOzoneCluster cluster = null;
   private ObjectStore store = null;
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestHadoopDirTreeGenerator.class);
 
   @Before
   public void setup() {
@@ -74,7 +79,7 @@ public class TestHadoopDirTreeGenerator {
    * @throws IOException
    */
   private void startCluster() throws Exception {
-    conf = new OzoneConfiguration();
+    conf = getOzoneConfiguration();
 
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
     cluster.waitForClusterToBeReady();
@@ -83,6 +88,10 @@ public class TestHadoopDirTreeGenerator {
     store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
   }
 
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @Test
   public void testNestedDirTreeGeneration() throws Exception {
     try {
@@ -103,6 +112,9 @@ public class TestHadoopDirTreeGenerator {
               2, 4, 2);
       verifyDirTree("vol5", "bucket1", 5,
               4, 1, 0);
+      // default page size is Constants.LISTING_PAGE_SIZE = 1024
+      verifyDirTree("vol6", "bucket1", 2,
+              1, 1100, 0);
     } finally {
       shutdown();
     }
@@ -122,6 +134,7 @@ public class TestHadoopDirTreeGenerator {
             fileCount + "", "-s", span + "", "-n", "1", "-r", rootPath,
                      "-g", perFileSizeInBytes + ""});
     // verify the directory structure
+    LOG.info("Started verifying the directory structure...");
     FileSystem fileSystem = FileSystem.get(URI.create(rootPath),
             conf);
     Path rootDir = new Path(rootPath.concat("/"));
@@ -149,6 +162,7 @@ public class TestHadoopDirTreeGenerator {
       verifyActualSpan(expectedSpanCnt, fileStatuses);
     }
     int actualNumFiles = 0;
+    ArrayList <String> files = new ArrayList<>();
     for (FileStatus fileStatus : fileStatuses) {
       if (fileStatus.isDirectory()) {
         ++depth;
@@ -157,6 +171,12 @@ public class TestHadoopDirTreeGenerator {
       } else {
         Assert.assertEquals("Mismatches file len",
                 perFileSizeInBytes, fileStatus.getLen());
+        String fName = fileStatus.getPath().getName();
+        Assert.assertFalse("actualNumFiles:" + actualNumFiles +
+                        ", fName:" + fName + ", expectedFileCnt:" +
+                        expectedFileCnt + ", depth:" + depth,
+                files.contains(fName));
+        files.add(fName);
         actualNumFiles++;
       }
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
new file mode 100644
index 0000000..99d4f26
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+
+/**
+ * Test for HadoopDirTreeGenerator layout version V1.
+ */
+public class TestHadoopDirTreeGeneratorV1 extends TestHadoopDirTreeGenerator {
+
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return conf;
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 4a968fd..4a4327d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -31,6 +31,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
@@ -76,6 +77,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -95,7 +97,9 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
 import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -129,6 +133,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BL
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
 import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND;
@@ -1774,6 +1779,10 @@ public class KeyManagerImpl implements KeyManager {
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
 
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return getOzoneFileStatusV1(volumeName, bucketName, keyName,
+              args.getSortDatanodes(), clientAddress, false);
+    }
     return getOzoneFileStatus(volumeName, bucketName, keyName,
             args.getRefreshPipeline(), args.getSortDatanodes(), clientAddress);
   }
@@ -1838,6 +1847,65 @@ public class KeyManagerImpl implements KeyManager {
             FILE_NOT_FOUND);
   }
 
+
+  private OzoneFileStatus getOzoneFileStatusV1(String volumeName,
+      String bucketName, String keyName, boolean sortDatanodes,
+      String clientAddress, boolean skipFileNotFoundError) throws IOException {
+    OzoneFileStatus fileStatus = null;
+    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
+            bucketName);
+    try {
+      // Check if this is the root of the filesystem.
+      if (keyName.length() == 0) {
+        validateBucket(volumeName, bucketName);
+        return new OzoneFileStatus();
+      }
+
+      fileStatus = OMFileRequest.getOMKeyInfoIfExists(metadataManager,
+              volumeName, bucketName, keyName, scmBlockSize);
+
+    } finally {
+      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+              bucketName);
+    }
+
+    if (fileStatus != null) {
+      // if the key is a file then do refresh pipeline info in OM by asking SCM
+      if (fileStatus.isFile()) {
+
+        OmKeyInfo fileKeyInfo = fileStatus.getKeyInfo();
+
+        // refreshPipeline flag check has been removed as part of
+        // https://issues.apache.org/jira/browse/HDDS-3658.
+        // Please refer this jira for more details.
+        refresh(fileKeyInfo);
+
+        if (sortDatanodes) {
+          sortDatanodes(clientAddress, fileKeyInfo);
+        }
+        return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false);
+      } else {
+        return fileStatus;
+      }
+    }
+
+    // Key not found.
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Unable to get file status for the key: volume: {}, bucket:" +
+                      " {}, key: {}, with error: No such file exists.",
+              volumeName, bucketName, keyName);
+    }
+
+    // don't throw exception if this flag is true.
+    if (skipFileNotFoundError) {
+      return fileStatus;
+    }
+
+    throw new OMException("Unable to get file status: volume: " +
+            volumeName + " bucket: " + bucketName + " key: " + keyName,
+            FILE_NOT_FOUND);
+  }
+
   /**
    * Ozone FS api to create a directory. Parent directories if do not exist
    * are created for the input directory.
@@ -2083,6 +2151,10 @@ public class KeyManagerImpl implements KeyManager {
       return fileStatusList;
     }
 
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return listStatusV1(args, recursive, startKey, numEntries, clientAddress);
+    }
+
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
@@ -2218,6 +2290,332 @@ public class KeyManagerImpl implements KeyManager {
     return fileStatusList;
   }
 
+  public List<OzoneFileStatus> listStatusV1(OmKeyArgs args, boolean recursive,
+      String startKey, long numEntries, String clientAddress)
+          throws IOException {
+    Preconditions.checkNotNull(args, "Key args can not be null");
+
+    // unsorted OMKeyInfo list contains combine results from TableCache and DB.
+    List<OzoneFileStatus> fileStatusFinalList = new ArrayList<>();
+    LinkedHashSet<OzoneFileStatus> fileStatusList = new LinkedHashSet<>();
+    if (numEntries <= 0) {
+      return fileStatusFinalList;
+    }
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    String seekFileInDB;
+    String seekDirInDB;
+    long prefixKeyInDB;
+    String prefixPath = keyName;
+    int countEntries = 0;
+
+    // TODO: recursive flag=true will be handled in HDDS-4360 jira.
+    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
+            bucketName);
+    try {
+      if (Strings.isNullOrEmpty(startKey)) {
+        OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
+        if (fileStatus.isFile()) {
+          return Collections.singletonList(fileStatus);
+        }
+
+        // Not required to search in DeletedTable because all the deleted
+        // keys will be marked directly in dirTable or in keyTable by
+        // breaking the pointer to its sub-dirs and sub-files. So, there is no
+        // issue of inconsistency.
+
+        /*
+         * keyName is a directory.
+         * Say, "/a" is the dir name and its objectID is 1024, then seek
+         * will be doing with "1024/" to get all immediate descendants.
+         */
+        if (fileStatus.getKeyInfo() != null) {
+          prefixKeyInDB = fileStatus.getKeyInfo().getObjectID();
+        } else {
+          // list root directory.
+          String bucketKey = metadataManager.getBucketKey(volumeName,
+                  bucketName);
+          OmBucketInfo omBucketInfo =
+                  metadataManager.getBucketTable().get(bucketKey);
+          prefixKeyInDB = omBucketInfo.getObjectID();
+        }
+        seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+        seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+
+        // Order of seek -> (1)Seek dirs in dirTable (2)Seek files in fileTable
+        // 1. Seek the given key in key table.
+        countEntries = getFilesFromDirectory(fileStatusList, seekFileInDB,
+                prefixPath, prefixKeyInDB, startKey, countEntries, numEntries);
+        // 2. Seek the given key in dir table.
+        getDirectories(fileStatusList, seekDirInDB, prefixPath, prefixKeyInDB,
+                startKey, countEntries, numEntries, volumeName, bucketName,
+                recursive);
+      } else {
+        /*
+         * startKey will be used in iterator seek and sets the beginning point
+         * for key traversal.
+         * keyName will be used as parentID where the user has requested to
+         * list the keys from.
+         *
+         * When recursive flag=false, parentID won't change between two pages.
+         * For example: OM has a namespace like,
+         *    /a/1...1M files and /a/b/1...1M files.
+         *    /a/1...1M directories and /a/b/1...1M directories.
+         * Listing "/a", will always have the parentID as "a" irrespective of
+         * the startKey value.
+         */
+
+        // Check startKey is an immediate child of keyName. For example,
+        // keyName=/a/ and expected startKey=/a/b. startKey can't be /xyz/b.
+        if (!OzoneFSUtils.isImmediateChild(keyName, startKey)) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("StartKey {} is not an immediate child of keyName {}. " +
+                    "Returns empty list", startKey, keyName);
+          }
+          return Collections.emptyList();
+        }
+
+        OzoneFileStatus fileStatusInfo = getOzoneFileStatusV1(volumeName,
+                bucketName, startKey, false, null, true);
+
+        if (fileStatusInfo != null) {
+          prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID();
+          if(fileStatusInfo.isDirectory()){
+            seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB,
+                    fileStatusInfo.getKeyInfo().getFileName());
+
+            // Order of seek -> (1) Seek dirs only in dirTable. In OM, always
+            // the order of search is, first seek into fileTable and then
+            // dirTable. So, its not required to search again in the fileTable.
+
+            // Seek the given key in dirTable.
+            getDirectories(fileStatusList, seekDirInDB, prefixPath,
+                    prefixKeyInDB, startKey, countEntries, numEntries,
+                    volumeName, bucketName, recursive);
+          } else {
+            seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB,
+                    fileStatusInfo.getKeyInfo().getFileName());
+            // begins from the first sub-dir under the parent dir
+            seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+
+            // 1. Seek the given key in key table.
+            countEntries = getFilesFromDirectory(fileStatusList, seekFileInDB,
+                    prefixPath, prefixKeyInDB, startKey, countEntries,
+                    numEntries);
+            // 2. Seek the given key in dir table.
+            getDirectories(fileStatusList, seekDirInDB, prefixPath,
+                    prefixKeyInDB, startKey, countEntries, numEntries,
+                    volumeName, bucketName, recursive);
+          }
+        } else {
+          // TODO: HDDS-4364: startKey can be a non-existed key
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("StartKey {} is a non-existed key and returning empty " +
+                    "list", startKey);
+          }
+          return Collections.emptyList();
+        }
+      }
+    } finally {
+      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+              bucketName);
+    }
+    for (OzoneFileStatus fileStatus : fileStatusList) {
+      if (fileStatus.isFile()) {
+        // refreshPipeline flag check has been removed as part of
+        // https://issues.apache.org/jira/browse/HDDS-3658.
+        // Please refer this jira for more details.
+        refresh(fileStatus.getKeyInfo());
+
+        // No need to check if a key is deleted or not here, this is handled
+        // when adding entries to cacheKeyMap from DB.
+        if (args.getSortDatanodes()) {
+          sortDatanodes(clientAddress, fileStatus.getKeyInfo());
+        }
+      }
+    }
+    fileStatusFinalList.addAll(fileStatusList);
+    return fileStatusFinalList;
+  }
+
+  @SuppressWarnings("parameternumber")
+  protected int getDirectories(Set<OzoneFileStatus> fileStatusList,
+      String seekDirInDB, String prefixPath, long prefixKeyInDB,
+      String startKey, int countEntries, long numEntries, String volumeName,
+      String bucketName, boolean recursive) throws IOException {
+
+    Table dirTable = metadataManager.getDirectoryTable();
+    countEntries = listStatusFindDirsInTableCache(fileStatusList, dirTable,
+            prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName,
+            bucketName, countEntries, numEntries);
+    TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
+            iterator = dirTable.iterator();
+
+    iterator.seek(seekDirInDB);
+
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmDirectoryInfo dirInfo = iterator.value().getValue();
+      if (!isImmediateChild(dirInfo.getParentObjectID(), prefixKeyInDB)) {
+        break;
+      }
+
+      // TODO: recursive list will be handled in HDDS-4360 jira.
+      if (!recursive) {
+        String dirName = OMFileRequest.getAbsolutePath(prefixPath,
+                dirInfo.getName());
+        OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
+                bucketName, dirInfo, dirName);
+        fileStatusList.add(new OzoneFileStatus(omKeyInfo, scmBlockSize,
+                true));
+        countEntries++;
+      }
+      // move to next entry in the DirTable
+      iterator.next();
+    }
+
+    return countEntries;
+  }
+
+  private int getFilesFromDirectory(Set<OzoneFileStatus> fileStatusList,
+      String seekKeyInDB, String prefixKeyPath, long prefixKeyInDB,
+      String startKey, int countEntries, long numEntries) throws IOException {
+
+    Table<String, OmKeyInfo> keyTable = metadataManager.getKeyTable();
+    countEntries = listStatusFindFilesInTableCache(fileStatusList, keyTable,
+            prefixKeyInDB, seekKeyInDB, prefixKeyPath, startKey,
+            countEntries, numEntries);
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+            iterator = keyTable.iterator();
+    iterator.seek(seekKeyInDB);
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmKeyInfo keyInfo = iterator.value().getValue();
+
+      if (!isImmediateChild(keyInfo.getParentObjectID(), prefixKeyInDB)) {
+        break;
+      }
+
+      keyInfo.setFileName(keyInfo.getKeyName());
+      String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              keyInfo.getKeyName());
+      keyInfo.setKeyName(fullKeyPath);
+      fileStatusList.add(new OzoneFileStatus(keyInfo, scmBlockSize, false));
+      countEntries++;
+      iterator.next(); // move to next entry in the table
+    }
+    return countEntries;
+  }
+
+  private boolean isImmediateChild(long parentId, long ancestorId) {
+    return parentId == ancestorId;
+  }
+
+  /**
+   * Helper function for listStatus to find key in FileTableCache.
+   */
+  @SuppressWarnings("parameternumber")
+  private int listStatusFindFilesInTableCache(
+          Set<OzoneFileStatus> fileStatusList, Table<String,
+          OmKeyInfo> keyTable, long prefixKeyInDB, String seekKeyInDB,
+          String prefixKeyPath, String startKey, int countEntries,
+          long numEntries) {
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
+            cacheIter = keyTable.cacheIterator();
+
+    // TODO: recursive list will be handled in HDDS-4360 jira.
+    while (cacheIter.hasNext() && numEntries - countEntries > 0) {
+      Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry =
+              cacheIter.next();
+      String cacheKey = entry.getKey().getCacheKey();
+      OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue();
+      // cacheOmKeyInfo is null if an entry is deleted in cache
+      if(cacheOmKeyInfo == null){
+        continue;
+      }
+
+      cacheOmKeyInfo.setFileName(cacheOmKeyInfo.getKeyName());
+      String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              cacheOmKeyInfo.getKeyName());
+      cacheOmKeyInfo.setKeyName(fullKeyPath);
+
+      countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
+              seekKeyInDB, startKey, countEntries, cacheKey, cacheOmKeyInfo,
+              false);
+    }
+    return countEntries;
+  }
+
+  /**
+   * Helper function for listStatus to find key in DirTableCache.
+   */
+  @SuppressWarnings("parameternumber")
+  private int listStatusFindDirsInTableCache(
+          Set<OzoneFileStatus> fileStatusList, Table<String,
+          OmDirectoryInfo> dirTable, long prefixKeyInDB, String seekKeyInDB,
+          String prefixKeyPath, String startKey, String volumeName,
+          String bucketName, int countEntries, long numEntries) {
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            cacheIter = dirTable.cacheIterator();
+    // seekKeyInDB will have two type of values.
+    // 1. "1024/"   -> startKey is null or empty
+    // 2. "1024/b"  -> startKey exists
+    // TODO: recursive list will be handled in HDDS-4360 jira.
+    while (cacheIter.hasNext() && numEntries - countEntries > 0) {
+      Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>> entry =
+              cacheIter.next();
+      String cacheKey = entry.getKey().getCacheKey();
+      OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue();
+      if(cacheOmDirInfo == null){
+        continue;
+      }
+      String fullDirPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              cacheOmDirInfo.getName());
+      OmKeyInfo cacheDirKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
+              bucketName, cacheOmDirInfo, fullDirPath);
+
+      countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
+              seekKeyInDB, startKey, countEntries, cacheKey, cacheDirKeyInfo,
+              true);
+    }
+    return countEntries;
+  }
+
+  @SuppressWarnings("parameternumber")
+  private int addKeyInfoToFileStatusList(Set<OzoneFileStatus> fileStatusList,
+      long prefixKeyInDB, String seekKeyInDB, String startKey,
+      int countEntries, String cacheKey, OmKeyInfo cacheOmKeyInfo,
+      boolean isDirectory) {
+    // seekKeyInDB will have two type of values.
+    // 1. "1024/"   -> startKey is null or empty
+    // 2. "1024/b"  -> startKey exists
+    if (StringUtils.isBlank(startKey)) {
+      // startKey is null or empty, then the seekKeyInDB="1024/"
+      if (cacheKey.startsWith(seekKeyInDB)) {
+        OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
+                scmBlockSize, isDirectory);
+        fileStatusList.add(fileStatus);
+        countEntries++;
+      }
+    } else {
+      // startKey not empty, then the seekKeyInDB="1024/b" and
+      // seekKeyInDBWithOnlyParentID = "1024/". This is to avoid case of
+      // parentID with "102444" cache entries.
+      // Here, it has to list all the keys after "1024/b" and requires >=0
+      // string comparison.
+      String seekKeyInDBWithOnlyParentID = prefixKeyInDB + OM_KEY_PREFIX;
+      if (cacheKey.startsWith(seekKeyInDBWithOnlyParentID) &&
+              cacheKey.compareTo(seekKeyInDB) >= 0) {
+        OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
+                scmBlockSize, isDirectory);
+        fileStatusList.add(fileStatus);
+        countEntries++;
+      }
+    }
+    return countEntries;
+  }
+
   private String getNextGreaterString(String volumeName, String bucketName,
       String keyPrefix) throws IOException {
     // Increment the last character of the string and return the new ozone key.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
index c48ff78..fa66766 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
@@ -232,7 +232,6 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
   /**
    * Construct OmDirectoryInfo for every parent directory in missing list.
    *
-   * @param ozoneManager Ozone Manager
    * @param keyArgs      key arguments
    * @param pathInfo     list of parent directories to be created and its ACLs
    * @param trxnLogIndex transaction log index id
@@ -243,7 +242,6 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
           OzoneManager ozoneManager, KeyArgs keyArgs,
           OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex)
           throws IOException {
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
     List<OmDirectoryInfo> missingParentInfos = new ArrayList<>();
 
     // The base id is left shifted by 8 bits for creating space to
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index ce8d49b..5225d82 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -20,11 +20,15 @@ package org.apache.hadoop.ozone.om.request.file;
 
 import java.io.IOException;
 import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.base.Optional;
+import com.google.common.base.Strings;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -34,7 +38,10 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -432,7 +439,6 @@ public final class OMFileRequest {
    * @param omFileInfo        key info
    * @param fileName          file name
    * @param trxnLogIndex      transaction log index
-   * @return dbOmFileInfo, which keeps leaf node name in keyName field
    */
   public static void addOpenFileTableCacheEntry(
           OMMetadataManager omMetadataManager, String dbOpenFileName,
@@ -460,7 +466,6 @@ public final class OMFileRequest {
    * @param omFileInfo        key info
    * @param fileName          file name
    * @param trxnLogIndex      transaction log index
-   * @return dbOmFileInfo, which keeps leaf node name in keyName field
    */
   public static void addFileTableCacheEntry(
           OMMetadataManager omMetadataManager, String dbFileKey,
@@ -552,4 +557,112 @@ public final class OMFileRequest {
     return dbOmKeyInfo;
   }
 
+  /**
+   * Gets OmKeyInfo if exists for the given key name in the DB.
+   *
+   * @param omMetadataMgr metadata manager
+   * @param volumeName    volume name
+   * @param bucketName    bucket name
+   * @param keyName       key name
+   * @param scmBlockSize  scm block size
+   * @return OzoneFileStatus
+   * @throws IOException DB failure
+   */
+  @Nullable
+  public static OzoneFileStatus getOMKeyInfoIfExists(
+      OMMetadataManager omMetadataMgr, String volumeName, String bucketName,
+      String keyName, long scmBlockSize) throws IOException {
+
+    Path keyPath = Paths.get(keyName);
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataMgr.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataMgr.getBucketTable().get(bucketKey);
+
+    long lastKnownParentId = omBucketInfo.getObjectID();
+    OmDirectoryInfo omDirInfo = null;
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+
+      // For example, /vol1/buck1/a/b/c/d/e/file1.txt
+      // 1. Do lookup path component on directoryTable starting from bucket
+      // 'buck1' to the leaf node component, which is 'file1.txt'.
+      // 2. If there is no dir exists for the leaf node component 'file1.txt'
+      // then do look it on fileTable.
+      String dbNodeName = omMetadataMgr.getOzonePathKey(
+              lastKnownParentId, fileName);
+      omDirInfo = omMetadataMgr.getDirectoryTable().get(dbNodeName);
+
+      if (omDirInfo != null) {
+        lastKnownParentId = omDirInfo.getObjectID();
+      } else if (!elements.hasNext()) {
+        // reached last path component. Check file exists for the given path.
+        OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+                omMetadataMgr, dbNodeName, keyName);
+        if (omKeyInfo != null) {
+          return new OzoneFileStatus(omKeyInfo, scmBlockSize, false);
+        }
+      } else {
+        // Missing intermediate directory and just return null;
+        // key not found in DB
+        return null;
+      }
+    }
+
+    if (omDirInfo != null) {
+      OmKeyInfo omKeyInfo = getOmKeyInfo(volumeName, bucketName, omDirInfo,
+              keyName);
+      return new OzoneFileStatus(omKeyInfo, scmBlockSize, true);
+    }
+
+    // key not found in DB
+    return null;
+  }
+
+  /**
+   * Prepare OmKeyInfo from OmDirectoryInfo.
+   *
+   * @param volumeName volume name
+   * @param bucketName bucket name
+   * @param dirInfo    directory info
+   * @param keyName    user given key name
+   * @return OmKeyInfo object
+   */
+  @NotNull
+  public static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+      OmDirectoryInfo dirInfo, String keyName) {
+
+    OmKeyInfo.Builder builder = new OmKeyInfo.Builder();
+    builder.setParentObjectID(dirInfo.getParentObjectID());
+    builder.setKeyName(keyName);
+    builder.setAcls(dirInfo.getAcls());
+    builder.addAllMetadata(dirInfo.getMetadata());
+    builder.setVolumeName(volumeName);
+    builder.setBucketName(bucketName);
+    builder.setCreationTime(dirInfo.getCreationTime());
+    builder.setModificationTime(dirInfo.getModificationTime());
+    builder.setObjectID(dirInfo.getObjectID());
+    builder.setUpdateID(dirInfo.getUpdateID());
+    builder.setFileName(dirInfo.getName());
+    builder.setReplicationType(HddsProtos.ReplicationType.RATIS);
+    builder.setReplicationFactor(HddsProtos.ReplicationFactor.ONE);
+    builder.setOmKeyLocationInfos(Collections.singletonList(
+            new OmKeyLocationInfoGroup(0, new ArrayList<>())));
+    return builder.build();
+  }
+
+  /**
+   * Returns absolute path.
+   *
+   * @param prefixName prefix path
+   * @param fileName   file name
+   * @return absolute path
+   */
+  @NotNull
+  public static String getAbsolutePath(String prefixName, String fileName) {
+    if (Strings.isNullOrEmpty(prefixName)) {
+      return fileName;
+    }
+    return prefixName.concat(OzoneConsts.OZONE_URI_DELIMITER).concat(fileName);
+  }
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 24/29: HDDS-4973. [FSO] Missed to cleanup new FileTables in OMRequests (#2035)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit ad4eb7583377f3f74f395e43fa607f94e4c273e1
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Sat Mar 13 11:34:03 2021 +0530

    HDDS-4973. [FSO] Missed to cleanup new FileTables in OMRequests (#2035)
---
 .../hadoop/ozone/om/request/file/OMFileCreateRequestV1.java       | 3 +--
 .../hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java     | 3 +--
 .../apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java  | 3 +--
 .../apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java  | 3 +--
 .../request/s3/multipart/S3InitiateMultipartUploadRequestV1.java  | 3 +--
 .../hadoop/ozone/om/response/file/OMFileCreateResponseV1.java     | 8 ++++++++
 .../hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java   | 8 ++++++++
 .../hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java       | 8 ++++++++
 .../hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java       | 8 ++++++++
 .../s3/multipart/S3InitiateMultipartUploadResponseV1.java         | 8 ++++++++
 10 files changed, 45 insertions(+), 10 deletions(-)

diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
index f35c9a5..5afaf66 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
 import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
@@ -226,7 +225,7 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
       exception = ex;
       omMetrics.incNumCreateFileFails();
       omResponse.setCmdType(Type.CreateFile);
-      omClientResponse = new OMFileCreateResponse(createErrorOMResponse(
+      omClientResponse = new OMFileCreateResponseV1(createErrorOMResponse(
             omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java
index a6a2558..a4d581c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestV1.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponse;
 import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest;
@@ -167,7 +166,7 @@ public class OMAllocateBlockRequestV1 extends OMAllocateBlockRequest {
     } catch (IOException ex) {
       omMetrics.incNumBlockAllocateCallFails();
       exception = ex;
-      omClientResponse = new OMAllocateBlockResponse(createErrorOMResponse(
+      omClientResponse = new OMAllocateBlockResponseV1(createErrorOMResponse(
               omResponse, exception));
       LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " +
               "Exception:{}", volumeName, bucketName, openKeyName, exception);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
index a49c01e..c01c9ee 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponseV1;
-import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyResponse;
@@ -212,7 +211,7 @@ public class OMKeyCreateRequestV1 extends OMKeyCreateRequest {
       exception = ex;
       omMetrics.incNumKeyAllocateFails();
       omResponse.setCmdType(Type.CreateKey);
-      omClientResponse = new OMKeyCreateResponse(
+      omClientResponse = new OMKeyCreateResponseV1(
               createErrorOMResponse(omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
index ba022c5..56dcd6b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse;
 import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -197,7 +196,7 @@ public class OMKeyRenameRequestV1 extends OMKeyRenameRequest {
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new OMKeyRenameResponse(createErrorOMResponse(
+      omClientResponse = new OMKeyRenameResponseV1(createErrorOMResponse(
               omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
index d472bc1..06b2426 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestV1.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponse;
 import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest;
@@ -217,7 +216,7 @@ public class S3InitiateMultipartUploadRequestV1
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new S3InitiateMultipartUploadResponse(
+      omClientResponse = new S3InitiateMultipartUploadResponseV1(
           createErrorOMResponse(omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(transactionLogIndex, omClientResponse,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
index 7325def..beb0c99 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
@@ -53,6 +53,14 @@ public class OMFileCreateResponseV1 extends OMFileCreateResponse {
     this.parentDirInfos = parentDirInfos;
   }
 
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public OMFileCreateResponseV1(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataMgr,
                               BatchOperation batchOp) throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
index 138cca1..d750457 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
@@ -43,6 +43,14 @@ public class OMAllocateBlockResponseV1 extends OMAllocateBlockResponse {
     super(omResponse, omKeyInfo, clientID, omBucketInfo);
   }
 
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public OMAllocateBlockResponseV1(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java
index 59c7edf..04237d7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java
@@ -45,4 +45,12 @@ public class OMKeyCreateResponseV1 extends OMFileCreateResponseV1 {
     super(omResponse, omKeyInfo, parentDirInfos, openKeySessionID,
             omBucketInfo);
   }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public OMKeyCreateResponseV1(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
index 7a9b159..4437aa3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
@@ -46,6 +46,14 @@ public class OMKeyRenameResponseV1 extends OMKeyRenameResponse {
     this.isRenameDirectory = isRenameDirectory;
   }
 
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public OMKeyRenameResponseV1(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
                            BatchOperation batchOperation) throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseV1.java
index ff3e63f..4c3f00f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseV1.java
@@ -54,6 +54,14 @@ public class S3InitiateMultipartUploadResponseV1 extends
     this.parentDirInfos = parentDirInfos;
   }
 
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public S3InitiateMultipartUploadResponseV1(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 09/29: HDDS-2942. Putkey : create key table entries for intermediate directories in the key path (#1764)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 765b00b2046cf41913710320f3b45bc255b48d00
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Mon Jan 11 20:03:55 2021 +0530

    HDDS-2942. Putkey : create key table entries for intermediate directories in the key path (#1764)
---
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  | 215 +++++++++++++++++++++
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../om/request/file/OMFileCreateRequestV1.java     |   3 +-
 .../ozone/om/request/key/OMKeyCommitRequestV1.java |   5 +-
 .../ozone/om/request/key/OMKeyCreateRequest.java   |  22 ++-
 .../OMKeyCreateRequestV1.java}                     | 148 ++++++--------
 .../om/response/file/OMFileCreateResponseV1.java   |   3 +-
 .../om/response/key/OMAllocateBlockResponseV1.java |   1 -
 .../om/response/key/OMKeyCommitResponseV1.java     |   2 -
 ...kResponseV1.java => OMKeyCreateResponseV1.java} |  42 ++--
 .../om/request/key/TestOMKeyCreateRequest.java     |  53 ++---
 .../om/request/key/TestOMKeyCreateRequestV1.java   | 129 +++++++++++++
 .../key/TestOMAllocateBlockResponseV1.java         |   5 +-
 .../om/response/key/TestOMKeyCommitResponseV1.java |   3 +-
 ...ponseV1.java => TestOMKeyCreateResponseV1.java} |  77 +++-----
 15 files changed, 501 insertions(+), 211 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
new file mode 100644
index 0000000..c6ae4ca
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Assert;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.UUID;
+
+public class TestObjectStoreV1 {
+
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneConfiguration conf;
+  private static String clusterId;
+  private static String scmId;
+  private static String omId;
+
+  @Rule
+  public Timeout timeout = new Timeout(240000);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setClusterId(clusterId)
+            .setScmId(scmId)
+            .setOmId(omId)
+            .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @Test
+  public void testCreateKey() throws Exception {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String parent = "a/b/c/";
+    String file = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + file;
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    objectStore.createVolume(volumeName);
+
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    ozoneVolume.createBucket(bucketName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    Table<String, OmKeyInfo> openKeyTable =
+            cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
+
+    // before file creation
+    verifyKeyInFileTable(openKeyTable, file, 0, true);
+
+    String data = "random data";
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+            data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
+            new HashMap<>());
+
+    OmDirectoryInfo dirPathC = getDirInfo(volumeName, bucketName, parent);
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+
+    // after file creation
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            false);
+
+    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.close();
+
+    Table<String, OmKeyInfo> keyTable =
+            cluster.getOzoneManager().getMetadataManager().getKeyTable();
+
+    // After closing the file. File entry should be removed from openFileTable
+    // and it should be added to fileTable.
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), false);
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            true);
+
+    ozoneBucket.deleteKey(key);
+
+    // after key delete
+    verifyKeyInFileTable(keyTable, file, dirPathC.getObjectID(), true);
+    verifyKeyInOpenFileTable(openKeyTable, file, dirPathC.getObjectID(),
+            true);
+  }
+
+  private OmDirectoryInfo getDirInfo(String volumeName, String bucketName,
+      String parentKey) throws Exception {
+    OMMetadataManager omMetadataManager =
+            cluster.getOzoneManager().getMetadataManager();
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(parentKey, '/');
+    long parentId = bucketId;
+    OmDirectoryInfo dirInfo = null;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      String dbKey = omMetadataManager.getOzonePathKey(parentId,
+              pathElement);
+      dirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      parentId = dirInfo.getObjectID();
+    }
+    return dirInfo;
+  }
+
+  private void verifyKeyInFileTable(Table<String, OmKeyInfo> fileTable,
+      String fileName, long parentID, boolean isEmpty) throws IOException {
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator
+            = fileTable.iterator();
+
+    if (isEmpty) {
+      Assert.assertTrue("Table is not empty!", fileTable.isEmpty());
+    } else {
+      Assert.assertFalse("Table is empty!", fileTable.isEmpty());
+      while (iterator.hasNext()) {
+        Table.KeyValue<String, OmKeyInfo> next = iterator.next();
+        Assert.assertEquals("Invalid Key: " + next.getKey(),
+                parentID + "/" + fileName, next.getKey());
+        OmKeyInfo omKeyInfo = next.getValue();
+        Assert.assertEquals("Invalid Key", fileName,
+                omKeyInfo.getFileName());
+        Assert.assertEquals("Invalid Key", fileName,
+                omKeyInfo.getKeyName());
+        Assert.assertEquals("Invalid Key", parentID,
+                omKeyInfo.getParentObjectID());
+      }
+    }
+  }
+
+  private void verifyKeyInOpenFileTable(Table<String, OmKeyInfo> openFileTable,
+      String fileName, long parentID, boolean isEmpty) throws IOException {
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator
+            = openFileTable.iterator();
+
+    if (isEmpty) {
+      Assert.assertTrue("Table is not empty!", openFileTable.isEmpty());
+    } else {
+      Assert.assertFalse("Table is empty!", openFileTable.isEmpty());
+      while (iterator.hasNext()) {
+        Table.KeyValue<String, OmKeyInfo> next = iterator.next();
+        // used startsWith because the key format is,
+        // <parentID>/fileName/<clientID> and clientID is not visible.
+        Assert.assertTrue("Invalid Key: " + next.getKey(),
+                next.getKey().startsWith(parentID + "/" + fileName));
+        OmKeyInfo omKeyInfo = next.getValue();
+        Assert.assertEquals("Invalid Key", fileName,
+                omKeyInfo.getFileName());
+        Assert.assertEquals("Invalid Key", fileName,
+                omKeyInfo.getKeyName());
+        Assert.assertEquals("Invalid Key", parentID,
+                omKeyInfo.getParentObjectID());
+      }
+    }
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 4702181..d1ca182 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
@@ -140,6 +141,9 @@ public final class OzoneManagerRatisUtils {
       }
       return new OMAllocateBlockRequest(omRequest);
     case CreateKey:
+      if (omLayoutVersionV1) {
+        return new OMKeyCreateRequestV1(omRequest);
+      }
       return new OMKeyCreateRequest(omRequest);
     case CommitKey:
       if (omLayoutVersionV1) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
index e38908a..f35c9a5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -217,7 +217,8 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
           .setOpenVersion(openVersion).build())
           .setCmdType(Type.CreateFile);
       omClientResponse = new OMFileCreateResponseV1(omResponse.build(),
-              omFileInfo, missingParentInfos, clientID, omBucketInfo.copyObject());
+              omFileInfo, missingParentInfos, clientID,
+              omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
index 7d99119..64991de 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
@@ -91,7 +90,6 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
 
     IOException exception = null;
     OmKeyInfo omKeyInfo = null;
-    OmVolumeArgs omVolumeArgs = null;
     OmBucketInfo omBucketInfo = null;
     OMClientResponse omClientResponse = null;
     boolean bucketLockAcquired = false;
@@ -168,8 +166,7 @@ public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
       omBucketInfo.incrUsedBytes(correctedSpace);
 
       omClientResponse = new OMKeyCommitResponseV1(omResponse.build(),
-              omKeyInfo, dbFileKey, dbOpenFileKey, omVolumeArgs,
-              omBucketInfo.copyObject());
+              omKeyInfo, dbFileKey, dbOpenFileKey, omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index 55f4990..68c0c36 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -349,24 +349,34 @@ public class OMKeyCreateRequest extends OMKeyRequest {
         OMAction.ALLOCATE_KEY, auditMap, exception,
         getOmRequest().getUserInfo()));
 
+    logResult(createKeyRequest, omMetrics, exception, result,
+            numMissingParents);
+
+    return omClientResponse;
+  }
+
+  protected void logResult(CreateKeyRequest createKeyRequest,
+      OMMetrics omMetrics, IOException exception, Result result,
+       int numMissingParents) {
     switch (result) {
     case SUCCESS:
       // Missing directories are created immediately, counting that here.
       // The metric for the key is incremented as part of the key commit.
       omMetrics.incNumKeys(numMissingParents);
-      LOG.debug("Key created. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
+      LOG.debug("Key created. Volume:{}, Bucket:{}, Key:{}",
+              createKeyRequest.getKeyArgs().getVolumeName(),
+              createKeyRequest.getKeyArgs().getBucketName(),
+              createKeyRequest.getKeyArgs().getKeyName());
       break;
     case FAILURE:
       LOG.error("Key creation failed. Volume:{}, Bucket:{}, Key{}. " +
-          "Exception:{}", volumeName, bucketName, keyName, exception);
+          "Exception:{}", createKeyRequest.getKeyArgs().getVolumeName(),
+              createKeyRequest.getKeyArgs().getBucketName(),
+              createKeyRequest.getKeyArgs().getKeyName(), exception);
       break;
     default:
       LOG.error("Unrecognized Result for OMKeyCreateRequest: {}",
           createKeyRequest);
     }
-
-    return omClientResponse;
   }
-
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
similarity index 66%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
index e38908a..416e462 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestV1.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.ozone.om.request.file;
+package org.apache.hadoop.ozone.om.request.key;
 
 import com.google.common.base.Optional;
 import org.apache.hadoop.ozone.audit.OMAction;
@@ -29,15 +29,15 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
-import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponseV1;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
@@ -51,16 +51,19 @@ import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
 
 /**
- * Handles create file request layout version1.
+ * Handles CreateKey request layout version1.
  */
-public class OMFileCreateRequestV1 extends OMFileCreateRequest {
-
+public class OMKeyCreateRequestV1 extends OMKeyCreateRequest {
   private static final Logger LOG =
-      LoggerFactory.getLogger(OMFileCreateRequestV1.class);
-  public OMFileCreateRequestV1(OMRequest omRequest) {
+          LoggerFactory.getLogger(OMKeyCreateRequestV1.class);
+
+  public OMKeyCreateRequestV1(OMRequest omRequest) {
     super(omRequest);
   }
 
@@ -69,61 +72,42 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
   public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
       long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
 
-    CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
-    KeyArgs keyArgs = createFileRequest.getKeyArgs();
+    OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest =
+            getOmRequest().getCreateKeyRequest();
+
+    OzoneManagerProtocolProtos.KeyArgs keyArgs = createKeyRequest.getKeyArgs();
     Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
 
     String volumeName = keyArgs.getVolumeName();
     String bucketName = keyArgs.getBucketName();
     String keyName = keyArgs.getKeyName();
 
-    // if isRecursive is true, file would be created even if parent
-    // directories does not exist.
-    boolean isRecursive = createFileRequest.getIsRecursive();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("File create for : " + volumeName + "/" + bucketName + "/"
-          + keyName + ":" + isRecursive);
-    }
-
-    // if isOverWrite is true, file would be over written.
-    boolean isOverWrite = createFileRequest.getIsOverwrite();
-
     OMMetrics omMetrics = ozoneManager.getMetrics();
-    omMetrics.incNumCreateFile();
+    omMetrics.incNumKeyAllocates();
 
     OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-
-    boolean acquiredLock = false;
-
     OmBucketInfo omBucketInfo = null;
     final List<OmKeyLocationInfo> locations = new ArrayList<>();
-    List<OmDirectoryInfo> missingParentInfos;
-    int numKeysCreated = 0;
 
+    boolean acquireLock = false;
     OMClientResponse omClientResponse = null;
-    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
-        getOmRequest());
+    OzoneManagerProtocolProtos.OMResponse.Builder omResponse =
+            OmResponseUtil.getOMResponseBuilder(getOmRequest());
     IOException exception = null;
-    Result result = null;
+    Result result;
+    List<OmDirectoryInfo> missingParentInfos;
+    int numKeysCreated = 0;
     try {
       keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
       volumeName = keyArgs.getVolumeName();
       bucketName = keyArgs.getBucketName();
 
-      if (keyName.length() == 0) {
-        // Check if this is the root of the filesystem.
-        throw new OMException("Can not write to directory: " + keyName,
-                OMException.ResultCodes.NOT_A_FILE);
-      }
-
       // check Acl
       checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
-          IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
-
-      // acquire lock
-      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-          volumeName, bucketName);
+              IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
 
+      acquireLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
       OmKeyInfo dbFileInfo = null;
@@ -144,12 +128,14 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
         }
       }
 
-      // check if the file or directory already existed in OM
-      checkDirectoryResult(keyName, isOverWrite,
-              pathInfoV1.getDirectoryResult());
-
-      if (!isRecursive) {
-        checkAllParentsExist(keyArgs, pathInfoV1);
+      // Check if a file or directory exists with same key name.
+      if (pathInfoV1.getDirectoryResult() == DIRECTORY_EXISTS) {
+        throw new OMException("Cannot write to " +
+                "directory. createIntermediateDirs behavior is enabled and " +
+                "hence / has special interpretation: " + keyName, NOT_A_FILE);
+      } else if (pathInfoV1.getDirectoryResult() == FILE_EXISTS_IN_GIVENPATH) {
+        throw new OMException("Can not create file: " + keyName +
+                " as there is already file in the given path", NOT_A_FILE);
       }
 
       // add all missing parents to dir table
@@ -162,7 +148,7 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
 
       // do open key
       OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
-          omMetadataManager.getBucketKey(volumeName, bucketName));
+              omMetadataManager.getBucketKey(volumeName, bucketName));
 
       OmKeyInfo omFileInfo = prepareFileInfo(omMetadataManager, keyArgs,
               dbFileInfo, keyArgs.getDataSize(), locations,
@@ -172,15 +158,15 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
               ozoneManager.isRatisEnabled());
 
       long openVersion = omFileInfo.getLatestVersionLocations().getVersion();
-      long clientID = createFileRequest.getClientID();
+      long clientID = createKeyRequest.getClientID();
       String dbOpenFileName = omMetadataManager.getOpenFileName(
               pathInfoV1.getLastKnownParentId(), pathInfoV1.getLeafNodeName(),
               clientID);
 
       // Append new blocks
       List<OmKeyLocationInfo> newLocationList = keyArgs.getKeyLocationsList()
-          .stream().map(OmKeyLocationInfo::getFromProtobuf)
-          .collect(Collectors.toList());
+              .stream().map(OmKeyLocationInfo::getFromProtobuf)
+              .collect(Collectors.toList());
       omFileInfo.appendNewBlocks(newLocationList, false);
 
       omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
@@ -210,51 +196,39 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
 
       // Prepare response. Sets user given full key name in the 'keyName'
       // attribute in response object.
-      int clientVersion = getOmRequest().getVersion();
-      omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
-          .setKeyInfo(omFileInfo.getProtobuf(keyName, clientVersion))
-          .setID(clientID)
-          .setOpenVersion(openVersion).build())
-          .setCmdType(Type.CreateFile);
-      omClientResponse = new OMFileCreateResponseV1(omResponse.build(),
-              omFileInfo, missingParentInfos, clientID, omBucketInfo.copyObject());
+      omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
+              .setKeyInfo(omFileInfo.getProtobuf(keyName))
+              .setID(clientID)
+              .setOpenVersion(openVersion).build())
+              .setCmdType(Type.CreateKey);
+      omClientResponse = new OMKeyCreateResponseV1(omResponse.build(),
+              omFileInfo, missingParentInfos, clientID,
+              omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omMetrics.incNumCreateFileFails();
-      omResponse.setCmdType(Type.CreateFile);
-      omClientResponse = new OMFileCreateResponse(createErrorOMResponse(
-            omResponse, exception));
+      omMetrics.incNumKeyAllocateFails();
+      omResponse.setCmdType(Type.CreateKey);
+      omClientResponse = new OMKeyCreateResponse(
+              createErrorOMResponse(omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
-          omDoubleBufferHelper);
-      if (acquiredLock) {
+              omDoubleBufferHelper);
+      if (acquireLock) {
         omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
+                bucketName);
       }
     }
 
     // Audit Log outside the lock
     auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
-        OMAction.CREATE_FILE, auditMap, exception,
-        getOmRequest().getUserInfo()));
-
-    switch (result) {
-    case SUCCESS:
-      omMetrics.incNumKeys(numKeysCreated);
-      LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
-      break;
-    case FAILURE:
-      LOG.error("File create failed. Volume:{}, Bucket:{}, Key{}.",
-          volumeName, bucketName, keyName, exception);
-      break;
-    default:
-      LOG.error("Unrecognized Result for OMFileCreateRequest: {}",
-          createFileRequest);
-    }
+            OMAction.ALLOCATE_KEY, auditMap, exception,
+            getOmRequest().getUserInfo()));
+
+    logResult(createKeyRequest, omMetrics, exception, result,
+            numKeysCreated);
 
     return omClientResponse;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
index ccaaa6b..7325def 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
@@ -32,12 +32,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
  * Response for create file request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = OPEN_FILE_TABLE)
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE, OPEN_FILE_TABLE})
 public class OMFileCreateResponseV1 extends OMFileCreateResponse {
 
   private List<OmDirectoryInfo> parentDirInfos;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
index ef8b639..138cca1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
index c0840e3..5f0a337 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
@@ -43,7 +42,6 @@ public class OMKeyCommitResponseV1 extends OMKeyCommitResponse {
   public OMKeyCommitResponseV1(@Nonnull OMResponse omResponse,
                                @Nonnull OmKeyInfo omKeyInfo,
                                String ozoneKeyName, String openKeyName,
-                               @Nonnull OmVolumeArgs omVolumeArgs,
                                @Nonnull OmBucketInfo omBucketInfo) {
     super(omResponse, omKeyInfo, ozoneKeyName, openKeyName,
             omBucketInfo);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java
similarity index 51%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java
index ef8b639..59c7edf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseV1.java
@@ -18,43 +18,31 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
 import javax.annotation.Nonnull;
-import java.io.IOException;
+import java.util.List;
 
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
 
 /**
- * Response for AllocateBlock request layout version V1.
+ * Response for CreateKey request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE})
-public class OMAllocateBlockResponseV1 extends OMAllocateBlockResponse {
-
-  public OMAllocateBlockResponseV1(@Nonnull OMResponse omResponse,
-      @Nonnull OmKeyInfo omKeyInfo, long clientID,
-      @Nonnull OmBucketInfo omBucketInfo) {
-    super(omResponse, omKeyInfo, clientID, omBucketInfo);
-  }
-
-  @Override
-  public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
-
-    OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation,
-            getOmKeyInfo(), getClientID());
-
-    // update bucket usedBytes.
-    omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-            omMetadataManager.getBucketKey(getOmKeyInfo().getVolumeName(),
-                    getOmKeyInfo().getBucketName()), getOmBucketInfo());
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE, OPEN_FILE_TABLE})
+public class OMKeyCreateResponseV1 extends OMFileCreateResponseV1 {
+
+  public OMKeyCreateResponseV1(@Nonnull OMResponse omResponse,
+                               @Nonnull OmKeyInfo omKeyInfo,
+                               @Nonnull List<OmDirectoryInfo> parentDirInfos,
+                               long openKeySessionID,
+                               @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, parentDirInfos, openKeySessionID,
+            omBucketInfo);
   }
 }
-
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
index 7269957..3df6f38 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
@@ -74,7 +74,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
         doPreExecute(createKeyRequest(false, 0));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
     // Add volume and bucket entries to DB.
     addVolumeAndBucketToDB(volumeName, bucketName,
@@ -82,8 +82,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
 
     long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
+    String openKey = getOpenKey(id);
 
     // Before calling
     OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
@@ -138,7 +137,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
         doPreExecute(createKeyRequest(true, partNumber));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
     // Add volume and bucket entries to DB.
     addVolumeAndBucketToDB(volumeName, bucketName,
@@ -178,7 +177,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
         doPreExecute(createKeyRequest(false, 0));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
 
     long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
@@ -217,13 +216,12 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
             false, 0));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
 
     long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
+    String openKey = getOpenKey(id);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
         omMetadataManager);
@@ -248,8 +246,6 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
 
   }
 
-
-
   /**
    * This method calls preExecute and verify the modified request.
    * @param originalOMRequest
@@ -259,7 +255,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
   private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(originalOMRequest);
+            getOMKeyCreateRequest(originalOMRequest);
 
     OMRequest modifiedOmRequest =
         omKeyCreateRequest.preExecute(ozoneManager);
@@ -349,7 +345,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
   @Test
   public void testKeyCreateWithFileSystemPathsEnabled() throws Exception {
 
-    OzoneConfiguration configuration = new OzoneConfiguration();
+    OzoneConfiguration configuration = getOzoneConfiguration();
     configuration.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
     when(ozoneManager.getConfiguration()).thenReturn(configuration);
     when(ozoneManager.getEnableFileSystemPaths()).thenReturn(true);
@@ -367,8 +363,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
     createAndCheck(keyName);
 
     // Commit openKey entry.
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        keyName.substring(1), 0L, RATIS, THREE, omMetadataManager);
+    addToKeyTable(keyName);
 
     // Now create another file in same dir path.
     keyName = "/a/b/c/file2";
@@ -430,10 +425,15 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
 
   }
 
+  protected void addToKeyTable(String keyName) throws Exception {
+    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
+        keyName.substring(1), 0L, RATIS, THREE, omMetadataManager);
+  }
+
 
   private void checkNotAValidPath(String keyName) {
     OMRequest omRequest = createKeyRequest(false, 0, keyName);
-    OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     try {
       omKeyCreateRequest.preExecute(ozoneManager);
@@ -450,11 +450,11 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
   private void checkNotAFile(String keyName) throws Exception {
     OMRequest omRequest = createKeyRequest(false, 0, keyName);
 
-    OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     omRequest = omKeyCreateRequest.preExecute(ozoneManager);
 
-    omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     OMClientResponse omClientResponse =
         omKeyCreateRequest.validateAndUpdateCache(ozoneManager,
@@ -468,11 +468,11 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
   private void createAndCheck(String keyName) throws Exception {
     OMRequest omRequest = createKeyRequest(false, 0, keyName);
 
-    OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     omRequest = omKeyCreateRequest.preExecute(ozoneManager);
 
-    omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     OMClientResponse omClientResponse =
         omKeyCreateRequest.validateAndUpdateCache(ozoneManager,
@@ -483,7 +483,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
     checkCreatedPaths(omKeyCreateRequest, omRequest, keyName);
   }
 
-  private void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
+  protected void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
       OMRequest omRequest, String keyName) throws Exception {
     keyName = omKeyCreateRequest.validateAndNormalizeKey(true, keyName);
     // Check intermediate directories created or not.
@@ -497,9 +497,7 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
     Assert.assertNotNull(omKeyInfo);
   }
 
-
-
-  private void checkIntermediatePaths(Path keyPath) throws Exception {
+  protected long checkIntermediatePaths(Path keyPath) throws Exception {
     // Check intermediate paths are created
     keyPath = keyPath.getParent();
     while(keyPath != null) {
@@ -508,6 +506,15 @@ public class TestOMKeyCreateRequest extends TestOMKeyRequest {
               keyPath.toString())));
       keyPath = keyPath.getParent();
     }
+    return -1;
+  }
+
+  protected String getOpenKey(long id) throws IOException {
+    return omMetadataManager.getOpenKey(volumeName, bucketName,
+            keyName, id);
   }
 
+  protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) {
+    return new OMKeyCreateRequest(omRequest);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
new file mode 100644
index 0000000..83c640d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
+
+/**
+ * Tests OMCreateKeyRequestV1 class.
+ */
+public class TestOMKeyCreateRequestV1 extends TestOMKeyCreateRequest {
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    return config;
+  }
+
+  protected void addToKeyTable(String keyName) throws Exception {
+    Path keyPath = Paths.get(keyName);
+    long parentId = checkIntermediatePaths(keyPath);
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, fileName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            fileName, omKeyInfo, -1, 50, omMetadataManager);
+  }
+
+  protected void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
+      OMRequest omRequest, String keyName) throws Exception {
+    keyName = omKeyCreateRequest.validateAndNormalizeKey(true, keyName);
+    // Check intermediate directories created or not.
+    Path keyPath = Paths.get(keyName);
+    long parentID = checkIntermediatePaths(keyPath);
+
+    // Check open key entry
+    String fileName = keyPath.getFileName().toString();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            omRequest.getCreateKeyRequest().getClientID());
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    Assert.assertNotNull(omKeyInfo);
+  }
+
+  protected long checkIntermediatePaths(Path keyPath) throws Exception {
+    // Check intermediate paths are created
+    keyPath = keyPath.getParent(); // skip the file name
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long lastKnownParentId = omBucketInfo.getObjectID();
+
+    Iterator<Path> elements = keyPath.iterator();
+    StringBuilder fullKeyPath = new StringBuilder(bucketKey);
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+      fullKeyPath.append(OzoneConsts.OM_KEY_PREFIX);
+      fullKeyPath.append(fileName);
+      String dbNodeName = omMetadataManager.getOzonePathKey(
+              lastKnownParentId, fileName);
+      OmDirectoryInfo omDirInfo = omMetadataManager.getDirectoryTable().
+              get(dbNodeName);
+
+      Assert.assertNotNull("Parent key path:" + fullKeyPath +
+              " doesn't exist", omDirInfo);
+      lastKnownParentId = omDirInfo.getObjectID();
+    }
+
+    return lastKnownParentId;
+  }
+
+  protected String getOpenKey(long id) throws IOException {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    if (omBucketInfo != null) {
+      return omMetadataManager.getOpenFileName(omBucketInfo.getObjectID(),
+              keyName, id);
+    } else {
+      return omMetadataManager.getOpenFileName(1000, keyName, id);
+    }
+  }
+
+  protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) {
+    return new OMKeyCreateRequestV1(omRequest);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
index e105a37..92b3efe 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
@@ -64,8 +63,8 @@ public class TestOMAllocateBlockResponseV1
 
   @NotNull
   protected OMAllocateBlockResponse getOmAllocateBlockResponse(
-          OmKeyInfo omKeyInfo, OmVolumeArgs omVolumeArgs,
-          OmBucketInfo omBucketInfo, OMResponse omResponse) {
+          OmKeyInfo omKeyInfo, OmBucketInfo omBucketInfo,
+          OMResponse omResponse) {
     return new OMAllocateBlockResponseV1(omResponse, omKeyInfo, clientID,
             omBucketInfo);
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
index 1e59ce8..4d68a4b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
@@ -42,8 +42,7 @@ public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
           OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
           String ozoneKey) {
     Assert.assertNotNull(omBucketInfo);
-    return new OMKeyCommitResponseV1(
-            omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
+    return new OMKeyCommitResponseV1(omResponse, omKeyInfo, ozoneKey, openKey,
             omBucketInfo);
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
similarity index 60%
copy from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
copy to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
index 1e59ce8..e51a06b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
@@ -19,61 +19,31 @@
 package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.util.Time;
 import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 
 /**
- * Tests OMKeyCommitResponse layout version V1.
+ * Tests OMKeyCreateResponseV1.
  */
-public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
-
-  @NotNull
-  protected OMKeyCommitResponse getOmKeyCommitResponse(
-          OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
-          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
-          String ozoneKey) {
-    Assert.assertNotNull(omBucketInfo);
-    return new OMKeyCommitResponseV1(
-            omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
-            omBucketInfo);
-  }
+public class TestOMKeyCreateResponseV1 extends TestOMKeyCreateResponse {
 
   @NotNull
   @Override
-  protected OmKeyInfo getOmKeyInfo() {
-    Assert.assertNotNull(omBucketInfo);
-    return TestOMRequestUtils.createOmKeyInfo(volumeName,
-            omBucketInfo.getBucketName(), keyName, replicationType,
-            replicationFactor,
-            omBucketInfo.getObjectID() + 1,
-            omBucketInfo.getObjectID(), 100, Time.now());
-  }
-
-  @NotNull
-  @Override
-  protected void addKeyToOpenKeyTable() throws Exception {
-    Assert.assertNotNull(omBucketInfo);
-    long parentID = omBucketInfo.getObjectID();
-    long objectId = parentID + 10;
-
-    OmKeyInfo omKeyInfoV1 =
-            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
-                    HddsProtos.ReplicationType.RATIS,
-                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
-                    Time.now());
-
-    String fileName = OzoneFSUtils.getFileName(keyName);
-    TestOMRequestUtils.addFileToKeyTable(true, false,
-            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    return config;
   }
 
   @NotNull
@@ -86,21 +56,20 @@ public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
 
   @NotNull
   @Override
-  protected String getOzoneKey() {
+  protected OmKeyInfo getOmKeyInfo() {
     Assert.assertNotNull(omBucketInfo);
-    return omMetadataManager.getOzonePathKey(omBucketInfo.getObjectID(),
-            keyName);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
   }
 
   @NotNull
-  @Override
-  protected OzoneConfiguration getOzoneConfiguration() {
-    OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
-    // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
-    // and its not invoked in this test. Hence it is explicitly setting
-    // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
-    return config;
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMKeyCreateResponseV1(response, keyInfo, null, clientID,
+            bucketInfo);
   }
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 01/29: HDDS-2949: mkdir : store directory entries in a separate table (#1404)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 7565c97c27b1e7044638cb096487d6d7a2b5a716
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Fri Oct 2 00:45:19 2020 +0530

    HDDS-2949: mkdir : store directory entries in a separate table (#1404)
---
 .../common/src/main/resources/ozone-default.xml    |  10 +
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   7 +
 .../hadoop/ozone/om/helpers/OmDirectoryInfo.java   | 266 +++++++++
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  15 +
 .../apache/hadoop/fs/ozone/TestOzoneDirectory.java | 207 +++++++
 .../src/main/proto/OmClientProtocol.proto          |  11 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  17 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  26 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  14 +
 .../ozone/om/codec/OmDirectoryInfoCodec.java       |  60 ++
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  18 +
 .../om/request/file/OMDirectoryCreateRequest.java  |   3 +
 ...equest.java => OMDirectoryCreateRequestV1.java} | 324 +++++-----
 .../ozone/om/request/file/OMFileRequest.java       | 198 +++++++
 .../response/file/OMDirectoryCreateResponseV1.java | 103 ++++
 .../ozone/om/request/TestOMRequestUtils.java       |  37 ++
 .../file/TestOMDirectoryCreateRequestV1.java       | 649 +++++++++++++++++++++
 .../file/TestOMDirectoryCreateResponseV1.java      |  88 +++
 18 files changed, 1864 insertions(+), 189 deletions(-)

diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 8a5ebb5..89e07de 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2760,4 +2760,14 @@
       during OzoneManager init/SCM bootstrap.
     </description>
   </property>
+
+  <property>
+    <name>ozone.om.layout.version</name>
+    <tag>OZONE, OM</tag>
+    <value>V0</value>
+    <description>Temporary workaround for OM upgrade and will be replaced once
+      upgrade HDDS-3698 story reaches consensus. Defaulting to 'V0' so that
+      existing unit test cases won't be affected. New OM version should be 'V1'
+    </description>
+  </property>
 </configuration>
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 2eead63..34a1064 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -244,4 +244,11 @@ public final class OMConfigKeys {
       "ozone.fs.trash.checkpoint.interval";
 
   public static final long  OZONE_FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT = 0;
+
+  // TODO: Temporary workaround for OM upgrade path and will be replaced once
+  //  upgrade HDDS-3698 story reaches consensus. Defaulting to 'V0' so that
+  //  existing unit test cases won't be affected. New OM version should be 'V1'.
+  public static final String OZONE_OM_LAYOUT_VERSION =
+          "ozone.om.layout.version";
+  public static final String OZONE_OM_LAYOUT_VERSION_DEFAULT = "V0";
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
new file mode 100644
index 0000000..4c82047
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
@@ -0,0 +1,266 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+
+import java.util.*;
+
+/**
+ * This class represents the directory information by keeping each component
+ * in the user given path and a pointer to its parent directory element in the
+ * path. Also, it stores directory node related metdata details.
+ */
+public class OmDirectoryInfo extends WithObjectID {
+  private long parentObjectID; // pointer to parent directory
+
+  private String name; // directory name
+
+  private long creationTime;
+  private long modificationTime;
+
+  private List<OzoneAcl> acls;
+
+  public OmDirectoryInfo(Builder builder) {
+    this.name = builder.name;
+    this.acls = builder.acls;
+    this.metadata = builder.metadata;
+    this.objectID = builder.objectID;
+    this.updateID = builder.updateID;
+    this.parentObjectID = builder.parentObjectID;
+    this.creationTime = builder.creationTime;
+    this.modificationTime = builder.modificationTime;
+  }
+
+  /**
+   * Returns new builder class that builds a OmPrefixInfo.
+   *
+   * @return Builder
+   */
+  public static OmDirectoryInfo.Builder newBuilder() {
+    return new OmDirectoryInfo.Builder();
+  }
+
+  /**
+   * Builder for Directory Info.
+   */
+  public static class Builder {
+    private long parentObjectID; // pointer to parent directory
+
+    private long objectID;
+    private long updateID;
+
+    private String name;
+
+    private long creationTime;
+    private long modificationTime;
+
+    private List<OzoneAcl> acls;
+    private Map<String, String> metadata;
+
+    public Builder() {
+      //Default values
+      this.acls = new LinkedList<>();
+      this.metadata = new HashMap<>();
+    }
+
+    public Builder setParentObjectID(long parentObjectId) {
+      this.parentObjectID = parentObjectId;
+      return this;
+    }
+
+    public Builder setObjectID(long objectId) {
+      this.objectID = objectId;
+      return this;
+    }
+
+    public Builder setUpdateID(long updateId) {
+      this.updateID = updateId;
+      return this;
+    }
+
+    public Builder setName(String dirName) {
+      this.name = dirName;
+      return this;
+    }
+
+    public Builder setCreationTime(long newCreationTime) {
+      this.creationTime = newCreationTime;
+      return this;
+    }
+
+    public Builder setModificationTime(long newModificationTime) {
+      this.modificationTime = newModificationTime;
+      return this;
+    }
+
+    public Builder setAcls(List<OzoneAcl> listOfAcls) {
+      if (listOfAcls != null) {
+        this.acls.addAll(listOfAcls);
+      }
+      return this;
+    }
+
+    public Builder addAcl(OzoneAcl ozoneAcl) {
+      if (ozoneAcl != null) {
+        this.acls.add(ozoneAcl);
+      }
+      return this;
+    }
+
+    public Builder addMetadata(String key, String value) {
+      metadata.put(key, value);
+      return this;
+    }
+
+    public Builder addAllMetadata(Map<String, String> additionalMetadata) {
+      if (additionalMetadata != null) {
+        metadata.putAll(additionalMetadata);
+      }
+      return this;
+    }
+
+    public OmDirectoryInfo build() {
+      return new OmDirectoryInfo(this);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return getPath() + ":" + getObjectID();
+  }
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+  public String getPath() {
+    return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getName();
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  public long getModificationTime() {
+    return modificationTime;
+  }
+
+  public List<OzoneAcl> getAcls() {
+    return acls;
+  }
+
+  /**
+   * Creates DirectoryInfo protobuf from OmDirectoryInfo.
+   */
+  public OzoneManagerProtocolProtos.DirectoryInfo getProtobuf() {
+    OzoneManagerProtocolProtos.DirectoryInfo.Builder pib =
+            OzoneManagerProtocolProtos.DirectoryInfo.newBuilder().setName(name)
+                    .setCreationTime(creationTime)
+                    .setModificationTime(modificationTime)
+                    .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
+                    .setObjectID(objectID)
+                    .setUpdateID(updateID)
+                    .setParentID(parentObjectID);
+    if (acls != null) {
+      pib.addAllAcls(OzoneAclUtil.toProtobuf(acls));
+    }
+    return pib.build();
+  }
+
+  /**
+   * Parses DirectoryInfo protobuf and creates OmPrefixInfo.
+   * @param dirInfo
+   * @return instance of OmDirectoryInfo
+   */
+  public static OmDirectoryInfo getFromProtobuf(
+          OzoneManagerProtocolProtos.DirectoryInfo dirInfo) {
+    OmDirectoryInfo.Builder opib = OmDirectoryInfo.newBuilder()
+            .setName(dirInfo.getName())
+            .setCreationTime(dirInfo.getCreationTime())
+            .setModificationTime(dirInfo.getModificationTime())
+            .setAcls(OzoneAclUtil.fromProtobuf(dirInfo.getAclsList()));
+    if (dirInfo.getMetadataList() != null) {
+      opib.addAllMetadata(KeyValueUtil
+              .getFromProtobuf(dirInfo.getMetadataList()));
+    }
+    if (dirInfo.hasObjectID()) {
+      opib.setObjectID(dirInfo.getObjectID());
+    }
+    if (dirInfo.hasParentID()) {
+      opib.setParentObjectID(dirInfo.getParentID());
+    }
+    if (dirInfo.hasUpdateID()) {
+      opib.setUpdateID(dirInfo.getUpdateID());
+    }
+    return opib.build();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    OmDirectoryInfo omDirInfo = (OmDirectoryInfo) o;
+    return creationTime == omDirInfo.creationTime &&
+            modificationTime == omDirInfo.modificationTime &&
+            name.equals(omDirInfo.name) &&
+            Objects.equals(metadata, omDirInfo.metadata) &&
+            Objects.equals(acls, omDirInfo.acls) &&
+            objectID == omDirInfo.objectID &&
+            updateID == omDirInfo.updateID &&
+            parentObjectID == omDirInfo.parentObjectID;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(objectID, parentObjectID, name);
+  }
+
+  /**
+   * Return a new copy of the object.
+   */
+  public OmDirectoryInfo copyObject() {
+    OmDirectoryInfo.Builder builder = new Builder()
+            .setName(name)
+            .setCreationTime(creationTime)
+            .setModificationTime(modificationTime)
+            .setParentObjectID(parentObjectID)
+            .setObjectID(objectID)
+            .setUpdateID(updateID);
+
+    acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(),
+            acl.getName(), (BitSet) acl.getAclBitSet().clone(),
+            acl.getAclScope())));
+
+    if (metadata != null) {
+      metadata.forEach((k, v) -> builder.addMetadata(k, v));
+    }
+
+    return builder.build();
+  }
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index d1491ed..96df56f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.helpers;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.StringUtils;
 
+import javax.annotation.Nonnull;
 import java.nio.file.Paths;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
@@ -116,4 +117,18 @@ public final class OzoneFSUtils {
     }
     return true;
   }
+
+  /**
+   * The function returns leaf node name from the given absolute path. For
+   * example, the given key path '/a/b/c/d/e/file1' then it returns leaf node
+   * name 'file1'.
+   */
+  public static String getFileName(@Nonnull String keyName) {
+    java.nio.file.Path fileName = Paths.get(keyName).getFileName();
+    if (fileName != null) {
+      return fileName.toString();
+    }
+    // failed to converts a path key
+    return keyName;
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
new file mode 100644
index 0000000..87e9f09
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.Assert.fail;
+
+/**
+ * Test verifies the entries and operations in directory table.
+ */
+public class TestOzoneDirectory {
+
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestOzoneDirectory.class);
+
+  private MiniOzoneCluster cluster;
+  private FileSystem fs;
+  private OzoneFileSystem o3fs;
+  private String volumeName;
+  private String bucketName;
+
+  @Test(timeout = 300_000)
+  public void testMultiLevelDirs() throws Exception {
+    setupOzoneFileSystem();
+    // Op 1. create dir -> /d1/d2/d3/d4/
+    // Op 2. create dir -> /d1/d2/d3/d4/d5
+    // Op 3. create dir -> /d1/d2/d3/d4/d6
+    Path parent = new Path("/d1/d2/d3/d4/");
+    fs.mkdirs(parent);
+
+    OMMetadataManager omMgr = cluster.getOzoneManager().getMetadataManager();
+    OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(
+            omMgr.getBucketKey(volumeName, bucketName));
+    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
+
+    ArrayList<String> dirKeys = new ArrayList<>();
+    long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1",
+            dirKeys, omMgr);
+    long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys,
+            omMgr);
+    long d3ObjectID = verifyDirKey(d2ObjectID, "d3", "/d1/d2/d3",
+            dirKeys, omMgr);
+    long d4ObjectID = verifyDirKey(d3ObjectID, "d4", "/d1/d2/d3/d4",
+            dirKeys, omMgr);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            4, cluster.getOzoneManager().getMetrics().getNumKeys());
+
+    // verify entries in directory table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmDirectoryInfo>> iterator =
+            omMgr.getDirectoryTable().iterator();
+    iterator.seekToFirst();
+    int count = dirKeys.size();
+    Assert.assertEquals("Unexpected directory table entries!", 4, count);
+    while (iterator.hasNext()) {
+      count--;
+      Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
+      verifyKeyFormat(value.getKey(), dirKeys);
+    }
+    Assert.assertEquals("Unexpected directory table entries!", 0, count);
+
+    // verify entries in key table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmKeyInfo>> keyTableItr =
+            omMgr.getKeyTable().iterator();
+    while (keyTableItr.hasNext()) {
+      fail("Shouldn't add any entries in KeyTable!");
+    }
+
+    // create sub-dirs under same parent
+    Path subDir5 = new Path("/d1/d2/d3/d4/d5");
+    fs.mkdirs(subDir5);
+    Path subDir6 = new Path("/d1/d2/d3/d4/d6");
+    fs.mkdirs(subDir6);
+    long d5ObjectID = verifyDirKey(d4ObjectID, "d5",
+            "/d1/d2/d3/d4/d5", dirKeys, omMgr);
+    long d6ObjectID = verifyDirKey(d4ObjectID, "d6",
+            "/d1/d2/d3/d4/d6", dirKeys, omMgr);
+    Assert.assertTrue("Wrong objectIds for sub-dirs[" + d5ObjectID +
+                    "/d5, " + d6ObjectID + "/d6] of same parent!",
+            d5ObjectID != d6ObjectID);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            6, cluster.getOzoneManager().getMetrics().getNumKeys());
+  }
+
+  /**
+   * Verify key name format and the DB key existence in the expected dirKeys
+   * list.
+   *
+   * @param key     table keyName
+   * @param dirKeys expected keyName
+   */
+  private void verifyKeyFormat(String key, ArrayList<String> dirKeys) {
+    String[] keyParts = StringUtils.split(key,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
+    boolean removed = dirKeys.remove(key);
+    Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
+            removed);
+  }
+
+  long verifyDirKey(long parentId, String dirKey, String absolutePath,
+                    ArrayList<String> dirKeys, OMMetadataManager omMgr)
+          throws Exception {
+    String dbKey = parentId + "/" + dirKey;
+    dirKeys.add(dbKey);
+    OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey);
+    Assert.assertNotNull("Failed to find " + absolutePath +
+            " using dbKey: " + dbKey, dirInfo);
+    Assert.assertEquals("Parent Id mismatches", parentId,
+            dirInfo.getParentObjectID());
+    Assert.assertEquals("Mismatches directory name", dirKey,
+            dirInfo.getName());
+    Assert.assertTrue("Mismatches directory creation time param",
+            dirInfo.getCreationTime() > 0);
+    Assert.assertEquals("Mismatches directory modification time param",
+            dirInfo.getCreationTime(), dirInfo.getModificationTime());
+    Assert.assertEquals("Wrong representation!",
+            dbKey + ":" + dirInfo.getObjectID(), dirInfo.toString());
+    return dirInfo.getObjectID();
+  }
+
+  private void setupOzoneFileSystem()
+          throws IOException, TimeoutException, InterruptedException {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .build();
+    cluster.waitForClusterToBeReady();
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+            bucket.getVolumeName());
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+    fs = FileSystem.get(conf);
+  }
+
+  @After
+  public void tearDown() {
+    IOUtils.closeQuietly(fs);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 7269c9a..68dd184 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -776,6 +776,17 @@ message KeyInfo {
     optional uint64 updateID = 15;
 }
 
+message DirectoryInfo {
+    required string name = 1;
+    required uint64 creationTime = 2;
+    required uint64 modificationTime = 3;
+    repeated hadoop.hdds.KeyValue metadata = 4;
+    repeated OzoneAclInfo acls = 5;
+    required uint64 objectID = 6;
+    required uint64 updateID = 7;
+    required uint64 parentID = 8;
+}
+
 message RepeatedKeyInfo {
     repeated KeyInfo keyInfo = 1;
 }
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index e0749c7..7efe0a3 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
@@ -375,6 +376,12 @@ public interface OMMetadataManager extends DBStoreHAManager {
       String bucketName, String prefix) throws IOException;
 
   /**
+   * Gets the DirectoryTable.
+   * @return Table.
+   */
+  Table<String, OmDirectoryInfo> getDirectoryTable();
+
+  /**
    * Return table mapped to the specified table name.
    * @param tableName
    * @return Table
@@ -398,4 +405,14 @@ public interface OMMetadataManager extends DBStoreHAManager {
 
   TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
       getKeyIterator();
+
+  /**
+   * Given a volume, bucket and a key, return the corresponding DB prefixKey
+   * key.
+   *
+   * @param parentObjectId - parent object Id
+   * @param pathComponentName   - path component name
+   * @return DB directory key as String.
+   */
+  String getOzonePathKey(long parentObjectId, String pathComponentName);
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 7d6a43b..28add3b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.hdds.utils.TransactionInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec;
+import org.apache.hadoop.ozone.om.codec.OmDirectoryInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec;
@@ -60,6 +61,7 @@ import org.apache.hadoop.ozone.om.codec.UserVolumeInfoCodec;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@@ -126,6 +128,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * |  multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...   |
    * |----------------------------------------------------------------------|
+   * |  directoryTable    | parentId/directoryName -> DirectoryInfo         |
    * |----------------------------------------------------------------------|
    * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
    * |----------------------------------------------------------------------|
@@ -141,6 +144,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String S3_SECRET_TABLE = "s3SecretTable";
   public static final String DELEGATION_TOKEN_TABLE = "dTokenTable";
   public static final String PREFIX_TABLE = "prefixTable";
+  public static final String DIRECTORY_TABLE = "directoryTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
@@ -159,6 +163,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private Table s3SecretTable;
   private Table dTokenTable;
   private Table prefixTable;
+  private Table dirTable;
   private Table transactionInfoTable;
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
@@ -244,6 +249,11 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   }
 
   @Override
+  public Table<String, OmDirectoryInfo> getDirectoryTable() {
+    return dirTable;
+  }
+
+  @Override
   public Table<String, OmMultipartKeyInfo> getMultipartInfoTable() {
     return multipartInfoTable;
   }
@@ -335,6 +345,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addTable(DELEGATION_TOKEN_TABLE)
         .addTable(S3_SECRET_TABLE)
         .addTable(PREFIX_TABLE)
+        .addTable(DIRECTORY_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -346,7 +357,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addCodec(OmMultipartKeyInfo.class, new OmMultipartKeyInfoCodec())
         .addCodec(S3SecretValue.class, new S3SecretValueCodec())
         .addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec())
-        .addCodec(TransactionInfo.class, new TransactionInfoCodec());
+        .addCodec(TransactionInfo.class, new TransactionInfoCodec())
+        .addCodec(OmDirectoryInfo.class, new OmDirectoryInfoCodec());
   }
 
   /**
@@ -400,6 +412,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         OmPrefixInfo.class);
     checkTableStatus(prefixTable, PREFIX_TABLE);
 
+    dirTable = this.store.getTable(DIRECTORY_TABLE, String.class,
+            OmDirectoryInfo.class);
+    checkTableStatus(dirTable, DIRECTORY_TABLE);
+
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
         String.class, TransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
@@ -1165,4 +1181,12 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
     return tableMap.keySet();
   }
 
+  @Override
+  public String getOzonePathKey(long parentObjectId, String pathComponentName) {
+    StringBuilder builder = new StringBuilder();
+    builder.append(parentObjectId);
+    builder.append(OM_KEY_PREFIX).append(pathComponentName);
+    return builder.toString();
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index d56499b..780321a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -216,6 +216,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEF
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
@@ -1106,6 +1108,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       omRatisServer.start();
     }
 
+    // TODO: Temporary workaround for OM upgrade path and will be replaced once
+    //  upgrade HDDS-3698 story reaches consensus.
+    getOMLayoutVersion();
+
     metadataManager.start(configuration);
     startSecretManagerIfNecessary();
 
@@ -3664,6 +3670,14 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT);
   }
 
+  private void getOMLayoutVersion() {
+    String version = configuration.getTrimmed(OZONE_OM_LAYOUT_VERSION,
+            OZONE_OM_LAYOUT_VERSION_DEFAULT);
+    boolean omLayoutVersionV1 =
+            StringUtils.equalsIgnoreCase(version, "V1");
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(omLayoutVersionV1);
+  }
+
   /**
    * Create volume which is required for S3Gateway operations.
    * @throws IOException
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
new file mode 100644
index 0000000..ba592a9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.codec;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DirectoryInfo;
+
+import java.io.IOException;
+
+/**
+ * Codec to encode OmDirectoryInfo as byte array.
+ */
+public class OmDirectoryInfoCodec implements Codec<OmDirectoryInfo> {
+
+  @Override
+  public byte[] toPersistedFormat(OmDirectoryInfo object) throws IOException {
+    Preconditions
+            .checkNotNull(object, "Null object can't be converted " +
+                    "to byte array.");
+    return object.getProtobuf().toByteArray();
+  }
+
+  @Override
+  public OmDirectoryInfo fromPersistedFormat(byte[] rawData)
+          throws IOException {
+    Preconditions
+            .checkNotNull(rawData,
+                    "Null byte array can't converted to real object.");
+    try {
+      return OmDirectoryInfo.getFromProtobuf(DirectoryInfo.parseFrom(rawData));
+    } catch (InvalidProtocolBufferException e) {
+      throw new IllegalArgumentException(
+              "Can't encode the the raw data from the byte array", e);
+    }
+  }
+
+  @Override
+  public OmDirectoryInfo copyObject(OmDirectoryInfo object) {
+    return object.copyObject();
+  }
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 14657c6..c3fc994 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketAddAclRequest;
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
@@ -79,8 +80,22 @@ import java.nio.file.Path;
  */
 public final class OzoneManagerRatisUtils {
 
+  // TODO: Temporary workaround for OM upgrade path and will be replaced once
+  //  upgrade HDDS-3698 story reaches consensus.
+  private static boolean omLayoutVersionV1 = true;
+
   private OzoneManagerRatisUtils() {
   }
+
+  /**
+   * Sets layout version.
+   *
+   * @param layoutVersionV1 om layout version
+   */
+  public static void setOmLayoutVersionV1(boolean layoutVersionV1) {
+    OzoneManagerRatisUtils.omLayoutVersionV1 = layoutVersionV1;
+  }
+
   /**
    * Create OMClientRequest which encapsulates the OMRequest.
    * @param omRequest
@@ -129,6 +144,9 @@ public final class OzoneManagerRatisUtils {
     case RenameKeys:
       return new OMKeysRenameRequest(omRequest);
     case CreateDirectory:
+      if (omLayoutVersionV1) {
+        return new OMDirectoryCreateRequestV1(omRequest);
+      }
       return new OMDirectoryCreateRequest(omRequest);
     case CreateFile:
       return new OMFileCreateRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index c14ca93..2be9efd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -361,4 +361,7 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
         .setUpdateID(objectId);
   }
 
+  static long getMaxNumOfRecursiveDirs() {
+    return MAX_NUM_OF_RECURSIVE_DIRS;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
similarity index 55%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
index c14ca93..8b0727a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
@@ -18,109 +18,64 @@
 
 package org.apache.hadoop.ozone.om.request.file;
 
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
 import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponse;
+import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateDirectoryRequest;
+        .CreateDirectoryRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CreateDirectoryResponse;
+        .CreateDirectoryResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .KeyArgs;
+        .KeyArgs;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMRequest;
+        .OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
+        .OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .Status;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+        .Status;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.NONE;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*;
+
 /**
- * Handle create directory request.
+ * Handle create directory request. It will add path components to the directory
+ * table and maintains file system semantics.
  */
-public class OMDirectoryCreateRequest extends OMKeyRequest {
+public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(OMDirectoryCreateRequest.class);
-
-  // The maximum number of directories which can be created through a single
-  // transaction (recursive directory creations) is 2^8 - 1 as only 8
-  // bits are set aside for this in ObjectID.
-  private static final long MAX_NUM_OF_RECURSIVE_DIRS = 255;
-
-  /**
-   * Stores the result of request execution in
-   * OMClientRequest#validateAndUpdateCache.
-   */
-  public enum Result {
-    SUCCESS, // The request was executed successfully
-
-    DIRECTORY_ALREADY_EXISTS, // Directory key already exists in DB
+      LoggerFactory.getLogger(OMDirectoryCreateRequestV1.class);
 
-    FAILURE // The request failed and exception was thrown
-  }
-
-  public OMDirectoryCreateRequest(OMRequest omRequest) {
+  public OMDirectoryCreateRequestV1(OMRequest omRequest) {
     super(omRequest);
   }
 
   @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) {
-    CreateDirectoryRequest createDirectoryRequest =
-        getOmRequest().getCreateDirectoryRequest();
-    Preconditions.checkNotNull(createDirectoryRequest);
-
-    KeyArgs.Builder newKeyArgs = createDirectoryRequest.getKeyArgs()
-        .toBuilder().setModificationTime(Time.now());
-
-    CreateDirectoryRequest.Builder newCreateDirectoryRequest =
-        createDirectoryRequest.toBuilder().setKeyArgs(newKeyArgs);
-
-    return getOmRequest().toBuilder().setCreateDirectoryRequest(
-        newCreateDirectoryRequest).setUserInfo(getUserInfo()).build();
-
-  }
-
-  @Override
   public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
       long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
 
@@ -131,6 +86,7 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
     String volumeName = keyArgs.getVolumeName();
     String bucketName = keyArgs.getBucketName();
     String keyName = keyArgs.getKeyName();
+    int numKeysCreated = 0;
 
     OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
         getOmRequest());
@@ -147,8 +103,7 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
     IOException exception = null;
     OMClientResponse omClientResponse = null;
     Result result = Result.FAILURE;
-    List<OmKeyInfo> missingParentInfos;
-    int numMissingParents = 0;
+    List<OmDirectoryInfo> missingParentInfos;
 
     try {
       keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
@@ -175,48 +130,50 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
 
       // Need to check if any files exist in the given path, if they exist we
       // cannot create a directory with the given key.
-      OMFileRequest.OMPathInfo omPathInfo =
-          OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName,
+      // Verify the path against directory table
+      OMFileRequest.OMPathInfoV1 omPathInfo =
+          OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName,
               bucketName, keyName, keyPath);
       OMFileRequest.OMDirectoryResult omDirectoryResult =
           omPathInfo.getDirectoryResult();
 
-      OmKeyInfo dirKeyInfo = null;
       if (omDirectoryResult == FILE_EXISTS ||
           omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
-        throw new OMException("Unable to create directory: " +keyName
-            + " in volume/bucket: " + volumeName + "/" + bucketName,
+        throw new OMException("Unable to create directory: " + keyName
+            + " in volume/bucket: " + volumeName + "/" + bucketName + " as " +
+                "file:" + omPathInfo.getFileExistsInPath() + " already exists",
             FILE_ALREADY_EXISTS);
       } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH ||
           omDirectoryResult == NONE) {
-        List<String> missingParents = omPathInfo.getMissingParents();
-        long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex);
-        List<OzoneAcl> inheritAcls = omPathInfo.getAcls();
-
-        dirKeyInfo = createDirectoryKeyInfoWithACL(keyName,
-            keyArgs, baseObjId,
-            OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()), trxnLogIndex);
-
-        missingParentInfos = getAllParentInfo(ozoneManager, keyArgs,
-            missingParents, inheritAcls, trxnLogIndex);
-
-        numMissingParents = missingParentInfos.size();
-        OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName,
-            bucketName, Optional.of(dirKeyInfo),
-            Optional.of(missingParentInfos), trxnLogIndex);
-        result = Result.SUCCESS;
-        omClientResponse = new OMDirectoryCreateResponse(omResponse.build(),
-            dirKeyInfo, missingParentInfos, result);
+
+        // prepare all missing parents
+        missingParentInfos = OMDirectoryCreateRequestV1.getAllParentDirInfo(
+                ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+        // prepare leafNode dir
+        OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(
+                omPathInfo.getLeafNodeName(),
+                keyArgs, omPathInfo.getLeafNodeObjectId(),
+                omPathInfo.getLastKnownParentId(), trxnLogIndex,
+                OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
+        OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+                Optional.of(dirInfo), Optional.of(missingParentInfos),
+                trxnLogIndex);
+
+        // total number of keys created.
+        numKeysCreated = missingParentInfos.size() + 1;
+
+        result = OMDirectoryCreateRequest.Result.SUCCESS;
+        omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(),
+                dirInfo, missingParentInfos, result);
       } else {
-        // omDirectoryResult == DIRECTORY_EXITS
         result = Result.DIRECTORY_ALREADY_EXISTS;
         omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS);
-        omClientResponse = new OMDirectoryCreateResponse(omResponse.build(),
+        omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(),
             result);
       }
     } catch (IOException ex) {
       exception = ex;
-      omClientResponse = new OMDirectoryCreateResponse(
+      omClientResponse = new OMDirectoryCreateResponseV1(
           createErrorOMResponse(omResponse, exception), result);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
@@ -230,135 +187,126 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
     auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY,
         auditMap, exception, userInfo));
 
-    logResult(createDirectoryRequest, keyArgs, omMetrics, result,
-        exception, numMissingParents);
+    logResult(createDirectoryRequest, keyArgs, omMetrics, numKeysCreated,
+            result, exception);
 
     return omClientResponse;
   }
 
+  private void logResult(CreateDirectoryRequest createDirectoryRequest,
+                         KeyArgs keyArgs, OMMetrics omMetrics, int numKeys,
+                         Result result,
+                         IOException exception) {
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.incNumKeys(numKeys);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Directory created. Volume:{}, Bucket:{}, Key:{}",
+            volumeName, bucketName, keyName);
+      }
+      break;
+    case DIRECTORY_ALREADY_EXISTS:
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}",
+            volumeName, bucketName, keyName, exception);
+      }
+      break;
+    case FAILURE:
+      omMetrics.incNumCreateDirectoryFails();
+      LOG.error("Directory creation failed. Volume:{}, Bucket:{}, Key{}. " +
+          "Exception:{}", volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMDirectoryCreateRequest: {}",
+          createDirectoryRequest);
+    }
+  }
+
   /**
-   * Construct OmKeyInfo for every parent directory in missing list.
+   * Construct OmDirectoryInfo for every parent directory in missing list.
    * @param ozoneManager
    * @param keyArgs
-   * @param missingParents list of parent directories to be created
-   * @param inheritAcls ACLs to be assigned to each new parent dir
+   * @param pathInfo list of parent directories to be created and its ACLs
    * @param trxnLogIndex
    * @return
    * @throws IOException
    */
-  public static List<OmKeyInfo> getAllParentInfo(OzoneManager ozoneManager,
-      KeyArgs keyArgs, List<String> missingParents, List<OzoneAcl> inheritAcls,
-      long trxnLogIndex) throws IOException {
+  public static List<OmDirectoryInfo> getAllParentDirInfo(
+          OzoneManager ozoneManager, KeyArgs keyArgs,
+          OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex)
+          throws IOException {
     OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
-    List<OmKeyInfo> missingParentInfos = new ArrayList<>();
+    List<OmDirectoryInfo> missingParentInfos = new ArrayList<>();
 
     // The base id is left shifted by 8 bits for creating space to
     // create (2^8 - 1) object ids in every request.
     // maxObjId represents the largest object id allocation possible inside
     // the transaction.
     long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex);
-    long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS;
-    long objectCount = 1; // baseObjID is used by the leaf directory
+    long maxObjId = baseObjId + getMaxNumOfRecursiveDirs();
+    long objectCount = 1;
 
     String volumeName = keyArgs.getVolumeName();
     String bucketName = keyArgs.getBucketName();
     String keyName = keyArgs.getKeyName();
 
+    long lastKnownParentId = pathInfo.getLastKnownParentId();
+    List<String> missingParents = pathInfo.getMissingParents();
+    List<OzoneAcl> inheritAcls = pathInfo.getAcls();
     for (String missingKey : missingParents) {
       long nextObjId = baseObjId + objectCount;
       if (nextObjId > maxObjId) {
         throw new OMException("Too many directories in path. Exceeds limit of "
-            + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: "
+            + getMaxNumOfRecursiveDirs() + ". Unable to create directory: "
             + keyName + " in volume/bucket: " + volumeName + "/" + bucketName,
             INVALID_KEY_NAME);
       }
 
-      LOG.debug("missing parent {} getting added to KeyTable", missingKey);
-      // what about keyArgs for parent directories? TODO
-      OmKeyInfo parentKeyInfo = createDirectoryKeyInfoWithACL(
-          missingKey, keyArgs, nextObjId, inheritAcls, trxnLogIndex);
+      LOG.debug("missing parent {} getting added to DirectoryTable",
+              missingKey);
+      OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey,
+              keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, inheritAcls);
       objectCount++;
 
-      missingParentInfos.add(parentKeyInfo);
-      omMetadataManager.getKeyTable().addCacheEntry(
-          new CacheKey<>(omMetadataManager.getOzoneKey(volumeName,
-              bucketName, parentKeyInfo.getKeyName())),
-          new CacheValue<>(Optional.of(parentKeyInfo),
-              trxnLogIndex));
-    }
-
-    return missingParentInfos;
-  }
+      missingParentInfos.add(dirInfo);
 
-  private void logResult(CreateDirectoryRequest createDirectoryRequest,
-      KeyArgs keyArgs, OMMetrics omMetrics, Result result,
-      IOException exception, int numMissingParents) {
-
-    String volumeName = keyArgs.getVolumeName();
-    String bucketName = keyArgs.getBucketName();
-    String keyName = keyArgs.getKeyName();
-
-    switch (result) {
-    case SUCCESS:
-      // Count for the missing parents plus the directory being created.
-      omMetrics.incNumKeys(numMissingParents + 1);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Directory created. Volume:{}, Bucket:{}, Key:{}",
-            volumeName, bucketName, keyName);
-      }
-      break;
-    case DIRECTORY_ALREADY_EXISTS:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}",
-            volumeName, bucketName, keyName, exception);
-      }
-      break;
-    case FAILURE:
-      omMetrics.incNumCreateDirectoryFails();
-      LOG.error("Directory creation failed. Volume:{}, Bucket:{}, Key{}. " +
-          "Exception:{}", volumeName, bucketName, keyName, exception);
-      break;
-    default:
-      LOG.error("Unrecognized Result for OMDirectoryCreateRequest: {}",
-          createDirectoryRequest);
+      // updating id for the next sub-dir
+      lastKnownParentId = nextObjId;
     }
+    pathInfo.setLastKnownParentId(lastKnownParentId);
+    pathInfo.setLeafNodeObjectId(baseObjId + objectCount);
+    return missingParentInfos;
   }
 
   /**
-   * fill in a KeyInfo for a new directory entry in OM database.
+   * Fill in a DirectoryInfo for a new directory entry in OM database.
    * without initializing ACLs from the KeyArgs - used for intermediate
    * directories which get created internally/recursively during file
    * and directory create.
-   * @param keyName
+   * @param dirName
    * @param keyArgs
    * @param objectId
-   * @param transactionIndex
-   * @return the OmKeyInfo structure
+   * @param parentObjectId
+   * @param inheritAcls
+   * @return the OmDirectoryInfo structure
    */
-  public static OmKeyInfo createDirectoryKeyInfoWithACL(
-      String keyName, KeyArgs keyArgs, long objectId,
-      List<OzoneAcl> inheritAcls, long transactionIndex) {
-    return dirKeyInfoBuilderNoACL(keyName, keyArgs, objectId)
-        .setAcls(inheritAcls).setUpdateID(transactionIndex).build();
+  public static OmDirectoryInfo createDirectoryInfoWithACL(
+          String dirName, KeyArgs keyArgs, long objectId,
+          long parentObjectId, long transactionIndex,
+          List<OzoneAcl> inheritAcls) {
+
+    return OmDirectoryInfo.newBuilder()
+            .setName(dirName)
+            .setCreationTime(keyArgs.getModificationTime())
+            .setModificationTime(keyArgs.getModificationTime())
+            .setObjectID(objectId)
+            .setUpdateID(transactionIndex)
+            .setParentObjectID(parentObjectId)
+            .setAcls(inheritAcls).build();
   }
-
-  private static OmKeyInfo.Builder dirKeyInfoBuilderNoACL(String keyName,
-      KeyArgs keyArgs, long objectId) {
-    String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
-
-    return new OmKeyInfo.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(dirName)
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, new ArrayList<>())))
-        .setCreationTime(keyArgs.getModificationTime())
-        .setModificationTime(keyArgs.getModificationTime())
-        .setDataSize(0)
-        .setReplicationType(HddsProtos.ReplicationType.RATIS)
-        .setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
-        .setObjectID(objectId)
-        .setUpdateID(objectId);
-  }
-
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index f020f12..d5543ba 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -21,14 +21,19 @@ package org.apache.hadoop.ozone.om.request.file;
 import java.io.IOException;
 import java.nio.file.Path;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.base.Optional;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -127,6 +132,169 @@ public final class OMFileRequest {
   }
 
   /**
+   * Verify any dir/key exist in the given path in the specified
+   * volume/bucket by iterating through directory table.
+   *
+   * @param omMetadataManager OM Metadata manager
+   * @param volumeName        volume name
+   * @param bucketName        bucket name
+   * @param keyName           key name
+   * @param keyPath           path
+   * @return OMPathInfoV1 path info object
+   * @throws IOException on DB failure
+   */
+  public static OMPathInfoV1 verifyDirectoryKeysInPath(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull String volumeName,
+          @Nonnull String bucketName, @Nonnull String keyName,
+          @Nonnull Path keyPath) throws IOException {
+
+    String leafNodeName = OzoneFSUtils.getFileName(keyName);
+    List<String> missing = new ArrayList<>();
+
+    // Found no files/ directories in the given path.
+    OMDirectoryResult result = OMDirectoryResult.NONE;
+
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    // by default, inherit bucket ACLs
+    List<OzoneAcl> inheritAcls = omBucketInfo.getAcls();
+
+    long lastKnownParentId = omBucketInfo.getObjectID();
+    String dbDirName = ""; // absolute path for trace logs
+    // for better logging
+    StringBuilder fullKeyPath = new StringBuilder(bucketKey);
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+      fullKeyPath.append(OzoneConsts.OM_KEY_PREFIX);
+      fullKeyPath.append(fileName);
+      if (missing.size() > 0) {
+        // Add all the sub-dirs to the missing list except the leaf element.
+        // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt.
+        // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list.
+        if(elements.hasNext()){
+          // skips leaf node.
+          missing.add(fileName);
+        }
+        continue;
+      }
+
+      // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt
+      // 1. Do lookup on directoryTable. If not exists goto next step.
+      // 2. Do look on keyTable. If not exists goto next step.
+      // 3. Add 'sub-dir' to missing parents list
+      String dbNodeName = omMetadataManager.getOzonePathKey(
+              lastKnownParentId, fileName);
+      OmDirectoryInfo omDirInfo = omMetadataManager.getDirectoryTable().
+              get(dbNodeName);
+      if (omDirInfo != null) {
+        dbDirName += omDirInfo.getName() + OzoneConsts.OZONE_URI_DELIMITER;
+        if (elements.hasNext()) {
+          result = OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
+          lastKnownParentId = omDirInfo.getObjectID();
+          inheritAcls = omDirInfo.getAcls();
+          continue;
+        } else {
+          // Checked all the sub-dirs till the leaf node.
+          // Found a directory in the given path.
+          result = OMDirectoryResult.DIRECTORY_EXISTS;
+        }
+      } else {
+        // Get parentID from the lastKnownParent. For any files, directly under
+        // the bucket, the parent is the bucketID. Say, "/vol1/buck1/file1"
+        // TODO: Need to add UT for this case along with OMFileCreateRequest.
+        if (omMetadataManager.getKeyTable().isExist(dbNodeName)) {
+          if (elements.hasNext()) {
+            // Found a file in the given key name.
+            result = OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
+          } else {
+            // Checked all the sub-dirs till the leaf file.
+            // Found a file with the given key name.
+            result = OMDirectoryResult.FILE_EXISTS;
+          }
+          break; // Skip directory traversal as it hits key.
+        }
+
+        // Add to missing list, there is no such file/directory with given name.
+        if (elements.hasNext()) {
+          missing.add(fileName);
+        }
+      }
+    }
+
+    LOG.trace("verifyFiles/Directories in Path : " + "/" + volumeName
+            + "/" + bucketName + "/" + keyName + ":" + result);
+
+    if (result == OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH || result ==
+            OMDirectoryResult.FILE_EXISTS) {
+      return new OMPathInfoV1(leafNodeName, lastKnownParentId, missing,
+              result, inheritAcls, fullKeyPath.toString());
+    }
+
+    String dbDirKeyName = omMetadataManager.getOzoneDirKey(volumeName,
+            bucketName, dbDirName);
+    LOG.trace("Acls inherited from parent " + dbDirKeyName + " are : "
+            + inheritAcls);
+
+    return new OMPathInfoV1(leafNodeName, lastKnownParentId, missing,
+            result, inheritAcls);
+  }
+
+  /**
+   * Class to return the results from verifyDirectoryKeysInPath.
+   * Includes the list of missing intermediate directories and
+   * the directory search result code.
+   */
+  public static class OMPathInfoV1 extends OMPathInfo{
+    private String leafNodeName;
+    private long lastKnownParentId;
+    private long leafNodeObjectId;
+    private String fileExistsInPath;
+
+    public OMPathInfoV1(String leafNodeName, long lastKnownParentId,
+                        List missingParents, OMDirectoryResult result,
+                        List<OzoneAcl> aclList, String fileExistsInPath) {
+      super(missingParents, result, aclList);
+      this.leafNodeName = leafNodeName;
+      this.lastKnownParentId = lastKnownParentId;
+      this.fileExistsInPath = fileExistsInPath;
+    }
+
+    public OMPathInfoV1(String leafNodeName, long lastKnownParentId,
+                        List missingParents, OMDirectoryResult result,
+                        List<OzoneAcl> aclList) {
+      this(leafNodeName, lastKnownParentId, missingParents, result, aclList,
+              "");
+    }
+
+    public String getLeafNodeName() {
+      return leafNodeName;
+    }
+
+    public long getLeafNodeObjectId() {
+      return leafNodeObjectId;
+    }
+
+    public void setLeafNodeObjectId(long leafNodeObjectId) {
+      this.leafNodeObjectId = leafNodeObjectId;
+    }
+
+    public void setLastKnownParentId(long lastKnownParentId) {
+      this.lastKnownParentId = lastKnownParentId;
+    }
+
+    public long getLastKnownParentId() {
+      return lastKnownParentId;
+    }
+
+    public String getFileExistsInPath() {
+      return fileExistsInPath;
+    }
+  }
+
+  /**
    * Class to return the results from verifyFilesInPath.
    * Includes the list of missing intermediate directories and
    * the directory search result code.
@@ -224,4 +392,34 @@ public final class OMFileRequest {
           new CacheValue<>(keyInfo, index));
     }
   }
+
+  /**
+   * Adding directory info to the Table cache.
+   *
+   * @param omMetadataManager  OM Metdata Manager
+   * @param dirInfo            directory info
+   * @param missingParentInfos list of the parents to be added to DB
+   * @param trxnLogIndex       transaction log index
+   */
+  public static void addDirectoryTableCacheEntries(
+          OMMetadataManager omMetadataManager,
+          Optional<OmDirectoryInfo> dirInfo,
+          Optional<List<OmDirectoryInfo>> missingParentInfos,
+          long trxnLogIndex) {
+    for (OmDirectoryInfo subDirInfo : missingParentInfos.get()) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(omMetadataManager.getOzonePathKey(
+                      subDirInfo.getParentObjectID(), subDirInfo.getName())),
+              new CacheValue<>(Optional.of(subDirInfo), trxnLogIndex));
+    }
+
+    if (dirInfo.isPresent()) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(omMetadataManager.getOzonePathKey(
+                      dirInfo.get().getParentObjectID(),
+                      dirInfo.get().getName())),
+              new CacheValue<>(dirInfo, trxnLogIndex));
+    }
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java
new file mode 100644
index 0000000..4e93fa7
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+
+/**
+ * Response for create directory request.
+ */
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE})
+public class OMDirectoryCreateResponseV1 extends OMClientResponse {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(OMDirectoryCreateResponseV1.class);
+
+  private OmDirectoryInfo dirInfo;
+  private List<OmDirectoryInfo> parentDirInfos;
+  private Result result;
+
+  public OMDirectoryCreateResponseV1(@Nonnull OMResponse omResponse,
+                                     @Nonnull OmDirectoryInfo dirInfo,
+                                     @Nonnull List<OmDirectoryInfo> pDirInfos,
+                                     @Nonnull Result result) {
+    super(omResponse);
+    this.dirInfo = dirInfo;
+    this.parentDirInfos = pDirInfos;
+    this.result = result;
+  }
+
+  /**
+   * For when the request is not successful or the directory already exists.
+   */
+  public OMDirectoryCreateResponseV1(@Nonnull OMResponse omResponse,
+                                     @Nonnull Result result) {
+    super(omResponse);
+    this.result = result;
+  }
+
+  @Override
+  protected void addToDBBatch(OMMetadataManager omMetadataManager,
+                              BatchOperation batchOperation)
+          throws IOException {
+    addToDirectoryTable(omMetadataManager, batchOperation);
+  }
+
+  private void addToDirectoryTable(OMMetadataManager omMetadataManager,
+                                BatchOperation batchOperation)
+          throws IOException {
+    if (dirInfo != null) {
+      if (parentDirInfos != null) {
+        for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+          String parentKey = omMetadataManager
+                  .getOzonePathKey(parentDirInfo.getParentObjectID(),
+                          parentDirInfo.getName());
+          LOG.debug("putWithBatch parent : dir {} info : {}", parentKey,
+                  parentDirInfo);
+          omMetadataManager.getDirectoryTable()
+                  .putWithBatch(batchOperation, parentKey, parentDirInfo);
+        }
+      }
+
+      String dirKey = omMetadataManager.getOzonePathKey(
+              dirInfo.getParentObjectID(), dirInfo.getName());
+      omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, dirKey,
+              dirInfo);
+    } else {
+      // When directory already exists, we don't add it to cache. And it is
+      // not an error, in this case dirKeyInfo will be null.
+      LOG.debug("Response Status is OK, dirKeyInfo is null in " +
+              "OMDirectoryCreateResponseV1");
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index ff1f9c3..a6ca2cf 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -232,6 +234,25 @@ public final class TestOMRequestUtils {
   }
 
   /**
+   * Add dir key entry to DirectoryTable.
+   *
+   * @throws Exception
+   */
+  public static void addDirKeyToDirTable(boolean addToCache,
+                                         OmDirectoryInfo omDirInfo,
+                                         long trxnLogIndex,
+                                         OMMetadataManager omMetadataManager)
+          throws Exception {
+    String ozoneKey = omDirInfo.getPath();
+    if (addToCache) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(ozoneKey),
+              new CacheValue<>(Optional.of(omDirInfo), trxnLogIndex));
+    }
+    omMetadataManager.getDirectoryTable().put(ozoneKey, omDirInfo);
+  }
+
+  /**
    * Create OmKeyInfo.
    */
   public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
@@ -242,6 +263,22 @@ public final class TestOMRequestUtils {
   }
 
   /**
+   * Create OmDirectoryInfo.
+   */
+  public static OmDirectoryInfo createOmDirectoryInfo(String keyName,
+                                                      long objectID,
+                                                      long parentObjID) {
+    return new OmDirectoryInfo.Builder()
+            .setName(keyName)
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setObjectID(objectID)
+            .setParentObjectID(parentObjID)
+            .setUpdateID(objectID)
+            .build();
+  }
+
+  /**
    * Create OmKeyInfo.
    */
   public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
new file mode 100644
index 0000000..77cf74b
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -0,0 +1,649 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditMessage;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ResolvedBucket;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.jetbrains.annotations.NotNull;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test OM directory create request V1 layout version.
+ */
+public class TestOMDirectoryCreateRequestV1 {
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private OzoneManager ozoneManager;
+  private OMMetrics omMetrics;
+  private OMMetadataManager omMetadataManager;
+  private AuditLogger auditLogger;
+  // Just setting ozoneManagerDoubleBuffer which does nothing.
+  private OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
+          ((response, transactionIndex) -> {
+            return null;
+          });
+
+  @Before
+  public void setup() throws Exception {
+    ozoneManager = Mockito.mock(OzoneManager.class);
+    omMetrics = OMMetrics.create();
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+            folder.newFolder().getAbsolutePath());
+    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
+    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
+    auditLogger = Mockito.mock(AuditLogger.class);
+    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
+    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+    when(ozoneManager.resolveBucketLink(any(KeyArgs.class),
+            any(OMClientRequest.class)))
+            .thenReturn(new ResolvedBucket(Pair.of("", ""), Pair.of("", "")));
+  }
+
+  @After
+  public void stop() {
+    omMetrics.unRegister();
+    Mockito.framework().clearInlineMocks();
+  }
+
+  @Test
+  public void testPreExecute() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    String keyName = "a/b/c";
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirectoryCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest =
+            omDirectoryCreateRequestV1.preExecute(ozoneManager);
+
+    // As in preExecute, we modify original request.
+    Assert.assertNotEquals(omRequest, modifiedOmRequest);
+  }
+
+  @Test
+  public void testValidateAndUpdateCache() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.OK);
+    verifyDirectoriesInDB(dirs, bucketID);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(VOLUME_NOT_FOUND,
+            omClientResponse.getOMResponse().getStatus());
+
+    // Key should not exist in DB
+    Assert.assertTrue("Unexpected directory entries!",
+            omMetadataManager.getDirectoryTable().isEmpty());
+
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
+
+    // Key should not exist in DB
+    Assert.assertTrue("Unexpected directory entries!",
+            omMetadataManager.getDirectoryTable().isEmpty());
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithSubDirectoryInPath()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+    int objID = 100;
+
+    //1. Create root
+    OmDirectoryInfo omDirInfo =
+            TestOMRequestUtils.createOmDirectoryInfo(dirs.get(0), objID++,
+                    bucketID);
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000,
+            omMetadataManager);
+    //2. Create sub-directory under root
+    omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(dirs.get(1), objID++,
+            omDirInfo.getObjectID());
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000,
+            omMetadataManager);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.OK);
+
+    // Key should exist in DB and cache.
+    verifyDirectoriesInDB(dirs, bucketID);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithDirectoryAlreadyExists()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    // bucketID is the parent
+    long parentID = bucketID;
+
+    // add all the directories into DirectoryTable
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      long objID = 100 + indx;
+      long txnID = 5000 + indx;
+      // for index=0, parentID is bucketID
+      OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+              dirs.get(indx), objID, parentID);
+      TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo,
+              txnID, omMetadataManager);
+
+      parentID = omDirInfo.getObjectID();
+    }
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.DIRECTORY_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should exist in DB and doesn't added to cache.
+    verifyDirectoriesInDB(dirs, bucketID);
+    verifyDirectoriesNotInCache(dirs, bucketID);
+  }
+
+  /**
+   * Case: File exists with the same name as the requested directory.
+   * Say, requested to createDir '/a/b/c' and there is a file exists with
+   * same name.
+   */
+  @Test
+  public void testValidateAndUpdateCacheWithFilesInPath() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long parentID = omBucketInfo.getObjectID();
+
+    // add all the parent directories into DirectoryTable. This won't create
+    // the leaf node and this will be used in CreateDirectoryReq.
+    for (int indx = 0; indx < dirs.size() - 1; indx++) {
+      long objID = 100 + indx;
+      long txnID = 5000 + indx;
+      // for index=0, parentID is bucketID
+      OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+              dirs.get(indx), objID, parentID);
+      TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo,
+              txnID, omMetadataManager);
+
+      parentID = omDirInfo.getObjectID();
+    }
+
+    long objID = parentID + 100;
+    long txnID = 50000;
+
+    // Add a file into the FileTable, this is to simulate "file exists" check.
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE, objID++);
+    String ozoneFileName = parentID + "/" + dirs.get(dirs.size() - 1);
+    omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneFileName),
+            new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+    omMetadataManager.getKeyTable().put(ozoneFileName, omKeyInfo);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest =
+            omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should not exist in DB
+    Assert.assertNotNull(omMetadataManager.getKeyTable().get(ozoneFileName));
+    // Key should not exist in DB
+    Assert.assertEquals("Wrong directories count!", 3,
+            omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+  }
+
+
+  /**
+   * Case: File exists in the given path.
+   * Say, requested to createDir '/a/b/c/d' and there is a file '/a/b' exists
+   * in the given path.
+   */
+  @Test
+  public void testValidateAndUpdateCacheWithFileExistsInGivenPath()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long parentID = omBucketInfo.getObjectID();
+
+    long objID = parentID + 100;
+    long txnID = 5000;
+
+    // for index=0, parentID is bucketID
+    OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+            dirs.get(0), objID++, parentID);
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo,
+            txnID, omMetadataManager);
+    parentID = omDirInfo.getObjectID();
+
+    // Add a key in second level.
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE, objID++);
+    String ozoneKey = parentID + "/" + dirs.get(1);
+    omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
+            new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+    omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest =
+            omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue("Invalid response code:" +
+                    omClientResponse.getOMResponse().getStatus(),
+            omClientResponse.getOMResponse().getStatus()
+                    == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should not exist in DB
+    Assert.assertTrue(omMetadataManager.getKeyTable().get(ozoneKey) != null);
+    // Key should not exist in DB
+    Assert.assertEquals("Wrong directories count!",
+            1, omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+  }
+
+  @Test
+  public void testCreateDirectoryUptoLimitOfMaxLevels255() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 255);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    verifyDirectoriesInDB(dirs, bucketID);
+
+    Assert.assertEquals(dirs.size(), omMetrics.getNumKeys());
+  }
+
+  @Test
+  public void testCreateDirectoryExceedLimitOfMaxLevels255() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 256);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager,
+                    100L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME,
+            omClientResponse.getOMResponse().getStatus());
+
+    Assert.assertEquals("Unexpected directories!", 0,
+            omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+
+    Assert.assertEquals(0, omMetrics.getNumKeys());
+  }
+
+  @Test
+  public void testCreateDirectoryOMMetric() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    verifyDirectoriesInDB(dirs, bucketID);
+
+    Assert.assertEquals(dirs.size(), omMetrics.getNumKeys());
+  }
+
+
+  @NotNull
+  private String createDirKey(List<String> dirs, int depth) {
+    String keyName = RandomStringUtils.randomAlphabetic(5);
+    dirs.add(keyName);
+    for (int i = 0; i < depth; i++) {
+      String dirName = RandomStringUtils.randomAlphabetic(5);
+      dirs.add(dirName);
+      keyName += "/" + dirName;
+    }
+    return keyName;
+  }
+
+  private void verifyDirectoriesInDB(List<String> dirs, long bucketID)
+          throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      OmDirectoryInfo omDirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      Assert.assertNotNull("Invalid directory!", omDirInfo);
+      Assert.assertEquals("Invalid directory!", dirName, omDirInfo.getName());
+      Assert.assertEquals("Invalid dir path!",
+              parentID + "/" + dirName, omDirInfo.getPath());
+      parentID = omDirInfo.getObjectID();
+    }
+  }
+
+  private void verifyDirectoriesNotInCache(List<String> dirs, long bucketID)
+          throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      CacheValue<OmDirectoryInfo> omDirInfoCacheValue =
+              omMetadataManager.getDirectoryTable()
+                      .getCacheValue(new CacheKey<>(dbKey));
+      Assert.assertNull("Unexpected directory!", omDirInfoCacheValue);
+    }
+  }
+
+  /**
+   * Create OMRequest which encapsulates CreateDirectory request.
+   *
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @return OMRequest
+   */
+  private OMRequest createDirectoryRequest(String volumeName, String bucketName,
+                                           String keyName) {
+    return OMRequest.newBuilder().setCreateDirectoryRequest(
+            CreateDirectoryRequest.newBuilder().setKeyArgs(
+                    KeyArgs.newBuilder().setVolumeName(volumeName)
+                            .setBucketName(bucketName).setKeyName(keyName)))
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
+            .setClientId(UUID.randomUUID().toString()).build();
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
new file mode 100644
index 0000000..0a1114a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Tests OMDirectoryCreateResponseV1 new layout version.
+ */
+public class TestOMDirectoryCreateResponseV1 {
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private OMMetadataManager omMetadataManager;
+  private BatchOperation batchOperation;
+
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+        folder.newFolder().getAbsolutePath());
+    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+    batchOperation = omMetadataManager.getStore().initBatchOperation();
+  }
+
+  @Test
+  public void testAddToDBBatch() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+
+    long parentID = 100;
+    OmDirectoryInfo omDirInfo =
+            TestOMRequestUtils.createOmDirectoryInfo(keyName, 500, parentID);
+
+    OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse(
+        OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance())
+            .setStatus(OzoneManagerProtocolProtos.Status.OK)
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
+            .build();
+
+    OMDirectoryCreateResponseV1 omDirectoryCreateResponseV1 =
+        new OMDirectoryCreateResponseV1(omResponse, omDirInfo,
+            new ArrayList<>(), OMDirectoryCreateRequestV1.Result.SUCCESS);
+
+    omDirectoryCreateResponseV1.addToDBBatch(omMetadataManager, batchOperation);
+
+    // Do manual commit and see whether addToBatch is successful or not.
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    Assert.assertNotNull(omMetadataManager.getDirectoryTable().get(
+            omMetadataManager.getOzonePathKey(parentID, keyName)));
+  }
+}

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 13/29: HDDS-4486. Feature Config: Make proper enableFSPaths and OFS optimized flag combinations (#1848)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit ffdd1a8ff594085e60f71a96214079462456257b
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Thu Jan 28 15:50:48 2021 +0530

    HDDS-4486. Feature Config: Make proper enableFSPaths and OFS optimized flag combinations (#1848)
---
 .../apache/hadoop/fs/ozone/TestOzoneDirectory.java |   4 +-
 .../hadoop/fs/ozone/TestOzoneFileInterfacesV1.java |   6 +-
 .../apache/hadoop/fs/ozone/TestOzoneFileOps.java   |   5 +-
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  13 ++-
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     |  66 +++---------
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |   9 +-
 .../ozone/freon/TestHadoopDirTreeGeneratorV1.java  |   4 +-
 .../apache/hadoop/ozone/om/TestObjectStoreV1.java  |   3 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |   8 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |   4 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  17 ++-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  35 ++++---
 .../om/request/bucket/OMBucketCreateRequest.java   |  28 +++--
 .../ozone/om/request/TestOMRequestUtils.java       |  42 ++++++++
 .../ozone/om/request/bucket/TestBucketRequest.java |   1 +
 .../request/bucket/TestOMBucketCreateRequest.java  |   2 +-
 .../bucket/TestOMBucketCreateRequestV1.java        | 114 +++++++++++++++++++++
 .../file/TestOMDirectoryCreateRequestV1.java       |   3 +-
 .../om/request/file/TestOMFileCreateRequestV1.java |   4 +-
 .../request/key/TestOMAllocateBlockRequestV1.java  |   4 +-
 .../om/request/key/TestOMKeyCommitRequestV1.java   |   4 +-
 .../om/request/key/TestOMKeyCreateRequestV1.java   |   4 +-
 .../om/request/key/TestOMKeyDeleteRequestV1.java   |   4 +-
 .../response/file/TestOMFileCreateResponseV1.java  |   4 +-
 .../key/TestOMAllocateBlockResponseV1.java         |   4 +-
 .../om/response/key/TestOMKeyCommitResponseV1.java |   4 +-
 .../om/response/key/TestOMKeyCreateResponseV1.java |   4 +-
 .../om/response/key/TestOMKeyDeleteResponseV1.java |   4 +-
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |  15 ++-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  10 +-
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |   5 +-
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java |   2 +-
 32 files changed, 288 insertions(+), 148 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
index 56c6177..22ed13e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -149,7 +150,8 @@ public class TestOzoneDirectory {
           throws IOException, TimeoutException, InterruptedException {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setNumDatanodes(3)
             .build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
index 93473be..d716457 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.ozone;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.jetbrains.annotations.NotNull;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -44,9 +45,8 @@ public class TestOzoneFileInterfacesV1 extends TestOzoneFileInterfaces {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-            enableFileSystemPaths);
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            enableFileSystemPaths, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     return conf;
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
index d097268..12dd51e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -73,8 +74,8 @@ public class TestOzoneFileOps {
           throws IOException, TimeoutException, InterruptedException {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, false);
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setNumDatanodes(3)
             .build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 09118b9..7e91576 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.TrashPolicyOzone;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -114,6 +115,7 @@ public class TestOzoneFileSystem {
       }
     }
   }
+
   /**
    * Set a timeout for each test.
    */
@@ -124,6 +126,8 @@ public class TestOzoneFileSystem {
       LoggerFactory.getLogger(TestOzoneFileSystem.class);
 
   @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected static boolean isBucketFSOptimized = false;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
   protected static boolean enabledFileSystemPaths;
   @SuppressWarnings("checkstyle:VisibilityModifier")
   protected static boolean omRatisEnabled;
@@ -148,8 +152,13 @@ public class TestOzoneFileSystem {
     conf.setInt(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, 1);
     conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
     conf.setBoolean(OZONE_ACL_ENABLED, true);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-            enabledFileSystemPaths);
+    if (isBucketFSOptimized) {
+      TestOMRequestUtils.configureFSOptimizedPaths(conf,
+              enabledFileSystemPaths, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+    } else {
+      conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+              enabledFileSystemPaths);
+    }
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setNumDatanodes(3)
             .build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index e574e94..ffeb5a3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -18,26 +18,15 @@
 
 package org.apache.hadoop.fs.ozone;
 
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.junit.Assert;
 import org.junit.After;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Rule;
@@ -51,9 +40,9 @@ import org.slf4j.LoggerFactory;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.fail;
@@ -66,49 +55,24 @@ import static org.junit.Assert.fail;
 @RunWith(Parameterized.class)
 public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
 
-  @Ignore("TODO:HDDS-2939")
-  @BeforeClass
-  public static void init() throws Exception {
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(
+            new Object[]{true, true},
+            new Object[]{true, false},
+            new Object[]{false, true},
+            new Object[]{false, false});
+  }
 
+  @BeforeClass
+  public static void init() {
+    isBucketFSOptimized = true;
   }
 
   public TestOzoneFileSystemV1(boolean setDefaultFs, boolean enableOMRatis) {
     super(setDefaultFs, enableOMRatis);
   }
 
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-            enabledFileSystemPaths);
-    if (enabledFileSystemPaths) {
-      conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
-    }
-    cluster = MiniOzoneCluster.newBuilder(conf)
-            .setNumDatanodes(3)
-            .build();
-    cluster.waitForClusterToBeReady();
-
-    // create a volume and a bucket to be used by OzoneFileSystem
-    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
-    volumeName = bucket.getVolumeName();
-    bucketName = bucket.getName();
-
-    String rootPath = String.format("%s://%s.%s/",
-            OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
-
-    // Set the fs.defaultFS and start the filesystem
-    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
-    // Set the number of keys to be processed during batch operate.
-    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
-
-    fs = FileSystem.get(conf);
-    trash = new Trash(conf);
-    o3fs = (OzoneFileSystem) fs;
-  }
-
   @After
   @Override
   public void cleanup() {
@@ -119,10 +83,6 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
       LOG.info("Failed to cleanup DB tables.", e);
       fail("Failed to cleanup DB tables." + e.getMessage());
     }
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    IOUtils.closeQuietly(fs);
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index a7dee52..d729ad3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 
 import org.junit.After;
 import org.junit.Assert;
@@ -102,7 +103,9 @@ public class TestReadRetries {
 
   @Parameterized.Parameters
   public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[]{"V0"}, new Object[]{"V1"});
+    return Arrays.asList(
+            new Object[]{OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT},
+            new Object[]{OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1});
   }
 
   /**
@@ -113,8 +116,8 @@ public class TestReadRetries {
   public void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, layoutVersion);
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, layoutVersion);
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .setScmId(SCM_ID)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
index 99d4f26..01a73bc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
@@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.freon;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 
 /**
  * Test for HadoopDirTreeGenerator layout version V1.
@@ -26,7 +27,8 @@ public class TestHadoopDirTreeGeneratorV1 extends TestHadoopDirTreeGenerator {
 
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     return conf;
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
index d343e2c..d09020e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreV1.java
@@ -88,7 +88,8 @@ public class TestObjectStoreV1 {
     clusterId = UUID.randomUUID().toString();
     scmId = UUID.randomUUID().toString();
     omId = UUID.randomUUID().toString();
-    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setClusterId(clusterId)
             .setScmId(scmId)
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 055ab13..604f7d2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -657,7 +657,7 @@ public class KeyManagerImpl implements KeyManager {
         bucketName);
     OmKeyInfo value = null;
     try {
-      if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
         value = getOmKeyInfoV1(volumeName, bucketName, keyName);
       } else {
         value = getOmKeyInfo(volumeName, bucketName, keyName);
@@ -1802,7 +1802,7 @@ public class KeyManagerImpl implements KeyManager {
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
 
-    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       return getOzoneFileStatusV1(volumeName, bucketName, keyName,
               args.getSortDatanodes(), clientAddress, false);
     }
@@ -2072,7 +2072,7 @@ public class KeyManagerImpl implements KeyManager {
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
     OzoneFileStatus fileStatus;
-    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       fileStatus = getOzoneFileStatusV1(volumeName, bucketName, keyName,
               args.getSortDatanodes(), clientAddress, false);
     } else {
@@ -2180,7 +2180,7 @@ public class KeyManagerImpl implements KeyManager {
       return fileStatusList;
     }
 
-    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       return listStatusV1(args, recursive, startKey, numEntries, clientAddress);
     }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 7042d67..4a55108 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -240,7 +240,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmKeyInfo> getKeyTable() {
-    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       return fileTable;
     }
     return keyTable;
@@ -253,7 +253,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmKeyInfo> getOpenKeyTable() {
-    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
       return openFileTable;
     }
     return openKeyTable;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 302b6e7..2b4484e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -218,6 +218,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_F
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
@@ -1109,7 +1110,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     }
 
     // TODO: Temporary workaround for OM upgrade path and will be replaced once
-    //  upgrade HDDS-3698 story reaches consensus.
+    //  upgrade HDDS-3698 story reaches consensus. Instead of cluster level
+    //  configuration, OM needs to check this property on every bucket level.
     getOMLayoutVersion();
 
     metadataManager.start(configuration);
@@ -3670,14 +3672,19 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT);
   }
 
-  private void getOMLayoutVersion() {
+  public String getOMLayoutVersion() {
     String version = configuration.getTrimmed(OZONE_OM_LAYOUT_VERSION,
             OZONE_OM_LAYOUT_VERSION_DEFAULT);
-    boolean omLayoutVersionV1 =
-            StringUtils.equalsIgnoreCase(version, "V1");
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(omLayoutVersionV1);
+    boolean omLayoutVersionV1 = StringUtils.equalsIgnoreCase(version,
+            OZONE_OM_LAYOUT_VERSION_V1);
     LOG.info("Configured {}={} and enabled:{} optimized OM FS operations",
             OZONE_OM_LAYOUT_VERSION, version, omLayoutVersionV1);
+
+    boolean isBucketFSOptimized =
+            omLayoutVersionV1 && getEnableFileSystemPaths();
+    OzoneManagerRatisUtils.setBucketFSOptimized(isBucketFSOptimized);
+
+    return version;
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index d1ca182..93e49f0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -88,18 +88,19 @@ public final class OzoneManagerRatisUtils {
 
   // TODO: Temporary workaround for OM upgrade path and will be replaced once
   //  upgrade HDDS-3698 story reaches consensus.
-  private static boolean omLayoutVersionV1 = false;
+  private static boolean isBucketFSOptimized = false;
 
   private OzoneManagerRatisUtils() {
   }
 
   /**
-   * Sets layout version.
+   * Sets enabled/disabled file system optimized path property. A true value
+   * represents enabled, false represents disabled.
    *
-   * @param layoutVersionV1 om layout version
+   * @param enabledFSO enabled/disabled file system optimized
    */
-  public static void setOmLayoutVersionV1(boolean layoutVersionV1) {
-    OzoneManagerRatisUtils.omLayoutVersionV1 = layoutVersionV1;
+  public static void setBucketFSOptimized(boolean enabledFSO) {
+    OzoneManagerRatisUtils.isBucketFSOptimized = enabledFSO;
   }
 
   /**
@@ -136,41 +137,41 @@ public final class OzoneManagerRatisUtils {
     case SetBucketProperty:
       return new OMBucketSetPropertyRequest(omRequest);
     case AllocateBlock:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMAllocateBlockRequestV1(omRequest);
       }
       return new OMAllocateBlockRequest(omRequest);
     case CreateKey:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMKeyCreateRequestV1(omRequest);
       }
       return new OMKeyCreateRequest(omRequest);
     case CommitKey:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMKeyCommitRequestV1(omRequest);
       }
       return new OMKeyCommitRequest(omRequest);
     case DeleteKey:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMKeyDeleteRequestV1(omRequest);
       }
       return new OMKeyDeleteRequest(omRequest);
     case DeleteKeys:
       return new OMKeysDeleteRequest(omRequest);
     case RenameKey:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMKeyRenameRequestV1(omRequest);
       }
       return new OMKeyRenameRequest(omRequest);
     case RenameKeys:
       return new OMKeysRenameRequest(omRequest);
     case CreateDirectory:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMDirectoryCreateRequestV1(omRequest);
       }
       return new OMDirectoryCreateRequest(omRequest);
     case CreateFile:
-      if (omLayoutVersionV1) {
+      if (isBucketFSOptimized()) {
         return new OMFileCreateRequestV1(omRequest);
       }
       return new OMFileCreateRequest(omRequest);
@@ -296,11 +297,13 @@ public final class OzoneManagerRatisUtils {
   }
 
   /**
-   * Returns layout version flag represents V1.
-   * @return
+   * Returns enabled/disabled file system optimized path property. A true value
+   * represents FSO path is enabled, false represents disabled.
+   *
+   * @return true or false.
    */
-  public static boolean isOmLayoutVersionV1() {
-    return omLayoutVersionV1;
+  public static boolean isBucketFSOptimized() {
+    return isBucketFSOptimized;
   }
 
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index 2fb27c5..76c4e42 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -26,7 +26,7 @@ import java.util.Map;
 
 import com.google.common.base.Optional;
 
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -159,7 +159,7 @@ public class OMBucketCreateRequest extends OMClientRequest {
     OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
 
     // Add layout version V1 to bucket info
-    addLayoutVersionToBucket(ozoneManager, omBucketInfo);
+    addFSOptimizedBucketDetails(ozoneManager, omBucketInfo);
 
     AuditLogger auditLogger = ozoneManager.getAuditLogger();
     OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
@@ -364,20 +364,30 @@ public class OMBucketCreateRequest extends OMClientRequest {
 
   }
 
-  private void addLayoutVersionToBucket(OzoneManager ozoneManager,
-                                        OmBucketInfo omBucketInfo) {
+  /**
+   * OM can support FS optimization only if both are flags are TRUE
+   * (enableFSOptimized=true && enableFSPaths=true) and will write table key
+   * entries in NEW_FORMAT(prefix separated format using objectID). All the
+   * other cases, it will
+   * write table key entries in OLD_FORMAT(existing format).
+   *
+   * @param ozoneManager ozone manager
+   * @param omBucketInfo bucket information
+   */
+  private void addFSOptimizedBucketDetails(OzoneManager ozoneManager,
+                                           OmBucketInfo omBucketInfo) {
     Map<String, String> metadata = omBucketInfo.getMetadata();
     if (metadata == null) {
       metadata = new HashMap<>();
     }
-    OzoneConfiguration configuration = ozoneManager.getConfiguration();
     // TODO: Many unit test cases has null config and done a simple null
     //  check now. It can be done later, to avoid massive test code changes.
-    if (configuration != null) {
-      String layOutVersion = configuration
-              .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION,
-                      OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT);
+    if(StringUtils.isNotBlank(ozoneManager.getOMLayoutVersion())){
+      String layOutVersion = ozoneManager.getOMLayoutVersion();
       metadata.put(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, layOutVersion);
+      boolean fsPathsEnabled = ozoneManager.getEnableFileSystemPaths();
+      metadata.put(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+              Boolean.toString(fsPathsEnabled));
       omBucketInfo.setMetadata(metadata);
     }
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index 61fd676..eca06f9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -26,6 +26,7 @@ import java.util.List;
 import java.util.UUID;
 
 import com.google.common.base.Optional;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -430,6 +432,25 @@ public final class TestOMRequestUtils {
         .setClientId(UUID.randomUUID().toString()).build();
   }
 
+  public static OzoneManagerProtocolProtos.OMRequest createBucketRequestV1(
+          String bucketName, String volumeName, boolean isVersionEnabled,
+          OzoneManagerProtocolProtos.StorageTypeProto storageTypeProto) {
+    OzoneManagerProtocolProtos.BucketInfo bucketInfo =
+            OzoneManagerProtocolProtos.BucketInfo.newBuilder()
+                    .setBucketName(bucketName)
+                    .setVolumeName(volumeName)
+                    .setIsVersionEnabled(isVersionEnabled)
+                    .setStorageType(storageTypeProto)
+                    .addAllMetadata(getMetadataListV1()).build();
+    OzoneManagerProtocolProtos.CreateBucketRequest.Builder req =
+            OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder();
+    req.setBucketInfo(bucketInfo);
+    return OzoneManagerProtocolProtos.OMRequest.newBuilder()
+            .setCreateBucketRequest(req)
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
+            .setClientId(UUID.randomUUID().toString()).build();
+  }
+
   public static List< HddsProtos.KeyValue> getMetadataList() {
     List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
     metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
@@ -439,6 +460,20 @@ public final class TestOMRequestUtils {
     return metadataList;
   }
 
+  public static List< HddsProtos.KeyValue> getMetadataListV1() {
+    List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
+            "value1").build());
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue(
+            "value2").build());
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey(
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION).setValue(
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1).build());
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey(
+            OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS).setValue(
+            "false").build());
+    return metadataList;
+  }
 
   /**
    * Add user to user table.
@@ -927,4 +962,11 @@ public final class TestOMRequestUtils {
     }
     return parentId;
   }
+
+  public static void configureFSOptimizedPaths(Configuration conf,
+      boolean enableFileSystemPaths, String version) {
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+            enableFileSystemPaths);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, version);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
index 7ae82f8..9381746 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
@@ -74,6 +74,7 @@ public class TestBucketRequest {
     auditLogger = Mockito.mock(AuditLogger.class);
     when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
     Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+    when(ozoneManager.getOMLayoutVersion()).thenReturn(null);
   }
 
   @After
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
index 06e140b..3615ce4 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
@@ -186,7 +186,7 @@ public class TestOMBucketCreateRequest extends TestBucketRequest {
 
   }
 
-  private void verifyRequest(OMRequest modifiedOmRequest,
+  protected void verifyRequest(OMRequest modifiedOmRequest,
       OMRequest originalRequest) {
     OzoneManagerProtocolProtos.BucketInfo original =
         originalRequest.getCreateBucketRequest().getBucketInfo();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestV1.java
new file mode 100644
index 0000000..a3b6ff7
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestV1.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.bucket;
+
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.UUID;
+
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests OMBucketCreateRequest class, which handles CreateBucket request.
+ */
+public class TestOMBucketCreateRequestV1 extends TestOMBucketCreateRequest {
+
+  @Test
+  public void testValidateAndUpdateCacheWithFSOBucket() throws Exception {
+    when(ozoneManager.getOMLayoutVersion()).thenReturn(
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+
+    OMBucketCreateRequest omBucketCreateRequest = doPreExecute(volumeName,
+        bucketName);
+
+    doValidateAndUpdateCache(volumeName, bucketName,
+        omBucketCreateRequest.getOmRequest());
+  }
+
+  private OMBucketCreateRequest doPreExecute(String volumeName,
+      String bucketName) throws Exception {
+    addCreateVolumeToTable(volumeName, omMetadataManager);
+    OMRequest originalRequest =
+        TestOMRequestUtils.createBucketRequestV1(bucketName, volumeName,
+                false, StorageTypeProto.SSD);
+
+    OMBucketCreateRequest omBucketCreateRequest =
+        new OMBucketCreateRequest(originalRequest);
+
+    OMRequest modifiedRequest = omBucketCreateRequest.preExecute(ozoneManager);
+    verifyRequest(modifiedRequest, originalRequest);
+    return new OMBucketCreateRequest(modifiedRequest);
+  }
+
+  private void doValidateAndUpdateCache(String volumeName, String bucketName,
+      OMRequest modifiedRequest) throws Exception {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+
+    // As we have not still called validateAndUpdateCache, get() should
+    // return null.
+
+    Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey));
+    OMBucketCreateRequest omBucketCreateRequest =
+        new OMBucketCreateRequest(modifiedRequest);
+
+
+    OMClientResponse omClientResponse =
+        omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
+            ozoneManagerDoubleBufferHelper);
+
+    // As now after validateAndUpdateCache it should add entry to cache, get
+    // should return non null value.
+    OmBucketInfo dbBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    Assert.assertNotNull(omMetadataManager.getBucketTable().get(bucketKey));
+
+    // verify table data with actual request data.
+    OmBucketInfo bucketInfoFromProto = OmBucketInfo.getFromProtobuf(
+        modifiedRequest.getCreateBucketRequest().getBucketInfo());
+
+    Assert.assertEquals(bucketInfoFromProto.getCreationTime(),
+        dbBucketInfo.getCreationTime());
+    Assert.assertEquals(bucketInfoFromProto.getModificationTime(),
+        dbBucketInfo.getModificationTime());
+    Assert.assertEquals(bucketInfoFromProto.getAcls(),
+        dbBucketInfo.getAcls());
+    Assert.assertEquals(bucketInfoFromProto.getIsVersionEnabled(),
+        dbBucketInfo.getIsVersionEnabled());
+    Assert.assertEquals(bucketInfoFromProto.getStorageType(),
+        dbBucketInfo.getStorageType());
+    Assert.assertEquals(bucketInfoFromProto.getMetadata(),
+        dbBucketInfo.getMetadata());
+    Assert.assertEquals(bucketInfoFromProto.getEncryptionKeyInfo(),
+        dbBucketInfo.getEncryptionKeyInfo());
+
+    // verify OMResponse.
+    verifySuccessCreateBucketResponse(omClientResponse.getOMResponse());
+
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
index f0f0320..454cfbb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -88,7 +88,8 @@ public class TestOMDirectoryCreateRequestV1 {
     OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
             folder.newFolder().getAbsolutePath());
-    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    TestOMRequestUtils.configureFSOptimizedPaths(ozoneConfiguration,
+            true, OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1);
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
     when(ozoneManager.getMetrics()).thenReturn(omMetrics);
     when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
index 046ac90..2631c91 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.om.request.file;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -183,11 +182,10 @@ public class TestOMFileCreateRequestV1 extends TestOMFileCreateRequest {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java
index 4e74979..5d21226 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestV1.java
@@ -23,7 +23,6 @@ package org.apache.hadoop.ozone.om.request.key;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -43,11 +42,10 @@ public class TestOMAllocateBlockRequestV1 extends TestOMAllocateBlockRequest {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
index ed1e2bd..379dbcf 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.request.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
@@ -87,11 +86,10 @@ public class TestOMKeyCommitRequestV1 extends TestOMKeyCommitRequest {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
index 83c640d..b65443d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestV1.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.request.key;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -47,11 +46,10 @@ public class TestOMKeyCreateRequestV1 extends TestOMKeyCreateRequest {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
index 7527e78..2c43d51 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.om.request.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
@@ -61,11 +60,10 @@ public class TestOMKeyDeleteRequestV1 extends TestOMKeyDeleteRequest {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
index bc4345e..e1549e1 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.ozone.om.response.file;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -68,11 +67,10 @@ public class TestOMFileCreateResponseV1 extends TestOMKeyCreateResponse {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
index 92b3efe..1626079 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseV1.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.response.key;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -73,11 +72,10 @@ public class TestOMAllocateBlockResponseV1
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
index 4d68a4b..7275f69 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
@@ -95,11 +94,10 @@ public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
index e51a06b..6299639 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseV1.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -38,11 +37,10 @@ public class TestOMKeyCreateResponseV1 extends TestOMKeyCreateResponse {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
index d35c79e..d46fe72 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
@@ -76,11 +75,10 @@ public class TestOMKeyDeleteResponseV1 extends TestOMKeyDeleteResponse {
   @Override
   protected OzoneConfiguration getOzoneConfiguration() {
     OzoneConfiguration config = super.getOzoneConfiguration();
-    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     // omLayoutVersionV1 flag will be set while invoking OzoneManager#start()
     // and its not invoked in this test. Hence it is explicitly setting
     // this configuration to populate prefix tables.
-    OzoneManagerRatisUtils.setOmLayoutVersionV1(true);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
     return config;
   }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index b4153f0..150108c 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -547,7 +547,18 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
   }
 
   @Override
-  public String getBucketLayoutVersion() {
-    return bucket.getMetadata().get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION);
+  public boolean isFSOptimizedBucket() {
+    // layout version V1 represents optimized FS path
+    boolean layoutVersionEnabled =
+            StringUtils.equalsIgnoreCase(
+                    OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1,
+                    bucket.getMetadata()
+                            .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION));
+
+    boolean fsEnabled =
+            Boolean.parseBoolean(bucket.getMetadata()
+                    .get(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS));
+
+    return layoutVersionEnabled && fsEnabled;
   }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index ba6b7a4..1f3cf6e 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -327,9 +326,7 @@ public class BasicOzoneFileSystem extends FileSystem {
       return false;
     }
 
-    String layOutVersion = adapter.getBucketLayoutVersion();
-    if (layOutVersion != null &&
-            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) {
+    if (adapter.isFSOptimizedBucket()) {
       return renameV1(srcPath, dstPath);
     }
 
@@ -509,10 +506,7 @@ public class BasicOzoneFileSystem extends FileSystem {
     statistics.incrementWriteOps(1);
     LOG.debug("Delete path {} - recursive {}", f, recursive);
 
-    String layOutVersion = adapter.getBucketLayoutVersion();
-    if (layOutVersion != null &&
-            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) {
-
+    if (adapter.isFSOptimizedBucket()) {
       if (f.isRoot()) {
         LOG.warn("Cannot delete root directory.");
         return false;
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 84cba47..6fdac47 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -59,7 +59,6 @@ import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -1049,8 +1048,8 @@ public class BasicRootedOzoneClientAdapterImpl
   }
 
   @Override
-  public String getBucketLayoutVersion() {
+  public boolean isFSOptimizedBucket() {
     // TODO: Need to refine this part.
-    return OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
+    return false;
   }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
index 4a4d91b..0258f69 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
@@ -77,5 +77,5 @@ public interface OzoneClientAdapter {
   FileStatusAdapter getFileStatus(String key, URI uri,
       Path qualifiedPath, String userName) throws IOException;
 
-  String getBucketLayoutVersion();
+  boolean isFSOptimizedBucket();
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 05/29: HDDS-4358: Delete : make delete an atomic operation (#1607)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 4295d4e341e8656a1bae54dffcb936814a8db050
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Mon Dec 7 13:16:36 2020 +0530

    HDDS-4358: Delete : make delete an atomic operation (#1607)
---
 .../apache/hadoop/ozone/client/OzoneBucket.java    |  16 +-
 .../ozone/client/protocol/ClientProtocol.java      |   5 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   3 +-
 .../hadoop/ozone/om/exceptions/OMException.java    |   4 +-
 .../apache/hadoop/ozone/om/helpers/OmKeyArgs.java  |  16 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   3 +-
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  21 ++-
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 182 +++----------------
 .../src/main/proto/OmClientProtocol.proto          |   4 +
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  10 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../ozone/om/request/file/OMFileRequest.java       |  92 ++++++++++
 .../ozone/om/request/key/OMKeyDeleteRequestV1.java | 202 +++++++++++++++++++++
 .../ozone/om/request/key/OMKeyRenameRequestV1.java |   3 +-
 .../ozone/om/response/key/OMKeyDeleteResponse.java |   8 +
 ...eteResponse.java => OMKeyDeleteResponseV1.java} |  49 ++---
 .../ozone/om/request/TestOMRequestUtils.java       |   3 +
 .../om/request/key/TestOMKeyDeleteRequest.java     |  40 ++--
 .../om/request/key/TestOMKeyDeleteRequestV1.java   |  57 ++++++
 .../om/response/key/TestOMKeyDeleteResponse.java   |  77 ++++----
 .../om/response/key/TestOMKeyDeleteResponseV1.java |  70 +++++++
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |  26 ++-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  26 ++-
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |  19 +-
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java |   4 +-
 25 files changed, 693 insertions(+), 251 deletions(-)

diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index f688a66..c1877b4 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -553,7 +553,21 @@ public class OzoneBucket extends WithMetadata {
    * @throws IOException
    */
   public void deleteKey(String key) throws IOException {
-    proxy.deleteKey(volumeName, name, key);
+    proxy.deleteKey(volumeName, name, key, false);
+  }
+
+  /**
+   * Ozone FS api to delete a directory. Sub directories will be deleted if
+   * recursive flag is true, otherwise it will be non-recursive.
+   *
+   * @param key       Name of the key to be deleted.
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
+   * @throws IOException
+   */
+  public void deleteDirectory(String key, boolean recursive)
+      throws IOException {
+    proxy.deleteKey(volumeName, name, key, recursive);
   }
 
   /**
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index cc93355..4185e18 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -291,9 +291,12 @@ public interface ClientProtocol {
    * @param volumeName Name of the Volume
    * @param bucketName Name of the Bucket
    * @param keyName Name of the Key
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
    * @throws IOException
    */
-  void deleteKey(String volumeName, String bucketName, String keyName)
+  void deleteKey(String volumeName, String bucketName, String keyName,
+                 boolean recursive)
       throws IOException;
 
   /**
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 0974682..68251ba 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -757,7 +757,7 @@ public class RpcClient implements ClientProtocol {
 
   @Override
   public void deleteKey(
-      String volumeName, String bucketName, String keyName)
+      String volumeName, String bucketName, String keyName, boolean recursive)
       throws IOException {
     verifyVolumeName(volumeName);
     verifyBucketName(bucketName);
@@ -766,6 +766,7 @@ public class RpcClient implements ClientProtocol {
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
+        .setRecursive(recursive)
         .build();
     ozoneManagerClient.deleteKey(keyArgs);
   }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index b676bca..bba97cd 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -233,7 +233,9 @@ public class OMException extends IOException {
 
     QUOTA_EXCEEDED,
 
-    QUOTA_ERROR
+    QUOTA_ERROR,
+
+    DIRECTORY_NOT_EMPTY
 
   }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
index c08c988..f8c7c23 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
@@ -48,6 +48,7 @@ public final class OmKeyArgs implements Auditable {
   private boolean refreshPipeline;
   private boolean sortDatanodesInPipeline;
   private List<OzoneAcl> acls;
+  private boolean recursive;
 
   @SuppressWarnings("parameternumber")
   private OmKeyArgs(String volumeName, String bucketName, String keyName,
@@ -55,7 +56,7 @@ public final class OmKeyArgs implements Auditable {
       List<OmKeyLocationInfo> locationInfoList, boolean isMultipart,
       String uploadID, int partNumber,
       Map<String, String> metadataMap, boolean refreshPipeline,
-      List<OzoneAcl> acls, boolean sortDatanode) {
+      List<OzoneAcl> acls, boolean sortDatanode, boolean recursive) {
     this.volumeName = volumeName;
     this.bucketName = bucketName;
     this.keyName = keyName;
@@ -70,6 +71,7 @@ public final class OmKeyArgs implements Auditable {
     this.refreshPipeline = refreshPipeline;
     this.acls = acls;
     this.sortDatanodesInPipeline = sortDatanode;
+    this.recursive = recursive;
   }
 
   public boolean getIsMultipartKey() {
@@ -140,6 +142,10 @@ public final class OmKeyArgs implements Auditable {
     return sortDatanodesInPipeline;
   }
 
+  public boolean isRecursive() {
+    return recursive;
+  }
+
   @Override
   public Map<String, String> toAuditMap() {
     Map<String, String> auditMap = new LinkedHashMap<>();
@@ -198,6 +204,7 @@ public final class OmKeyArgs implements Auditable {
     private boolean refreshPipeline;
     private boolean sortDatanodesInPipeline;
     private List<OzoneAcl> acls;
+    private boolean recursive;
 
     public Builder setVolumeName(String volume) {
       this.volumeName = volume;
@@ -274,11 +281,16 @@ public final class OmKeyArgs implements Auditable {
       return this;
     }
 
+    public Builder setRecursive(boolean isRecursive) {
+      this.recursive = isRecursive;
+      return this;
+    }
+
     public OmKeyArgs build() {
       return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type,
           factor, locationInfoList, isMultipartKey, multipartUploadID,
           multipartUploadPartNumber, metadata, refreshPipeline, acls,
-          sortDatanodesInPipeline);
+          sortDatanodesInPipeline, recursive);
     }
 
   }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 43d72b9..a7f3a2d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -746,7 +746,8 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
     KeyArgs keyArgs = KeyArgs.newBuilder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName()).build();
+        .setKeyName(args.getKeyName())
+        .setRecursive(args.isRecursive()).build();
     req.setKeyArgs(keyArgs);
 
     OMRequest omRequest = createOMRequest(Type.DeleteKey)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 2c1e643..855484c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.TrashPolicy;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
@@ -378,6 +379,24 @@ public class TestOzoneFileSystem {
       ContractTestUtils.touch(fs, child);
     }
 
+    // delete a dir with sub-file
+    try {
+      FileStatus[] parents = fs.listStatus(grandparent);
+      Assert.assertTrue(parents.length > 0);
+      fs.delete(parents[0].getPath(), false);
+      Assert.fail("Must throw exception as dir is not empty!");
+    } catch (PathIsNotEmptyDirectoryException pde) {
+      // expected
+    }
+
+    // delete a dir with sub-file
+    try {
+      fs.delete(grandparent, false);
+      Assert.fail("Must throw exception as dir is not empty!");
+    } catch (PathIsNotEmptyDirectoryException pde) {
+      // expected
+    }
+
     // Delete the grandparent, which should delete all keys.
     fs.delete(grandparent, true);
 
@@ -786,7 +805,7 @@ public class TestOzoneFileSystem {
 
     // Add a sub-directory '/b/a' to '/b'. This is to verify that rename
     // throws exception as new destin /b/a already exists.
-    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
+    final Path baPath = new Path(fs.getUri().toString() + "/b/a/c");
     fs.mkdirs(baPath);
 
     Assert.assertFalse("New destin sub-path /b/a already exists",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
index e2b7887..212080b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.fs.ozone;
 
 import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -30,19 +29,12 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.junit.Assert;
 import org.junit.After;
 import org.junit.Before;
@@ -59,8 +51,6 @@ import org.slf4j.LoggerFactory;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.Map;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
@@ -124,7 +114,7 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   public void cleanup() {
     super.cleanup();
     try {
-      tableCleanup();
+      deleteRootDir();
     } catch (IOException e) {
       LOG.info("Failed to cleanup DB tables.", e);
       fail("Failed to cleanup DB tables." + e.getMessage());
@@ -381,153 +371,8 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
   }
 
   /**
-   * Case-5) If new destin '/dst/source' exists then throws exception.
-   * If destination is a directory then rename source as sub-path of it.
-   * <p>
-   * For example: rename /a to /b will lead to /b/a. This new path should
-   * not exist.
-   */
-  @Test
-  public void testRenameToNewSubDirShouldNotExist() throws Exception {
-    // Case-5.a) Rename directory from /a to /b.
-    // created /a
-    final Path aSourcePath = new Path(fs.getUri().toString() + "/a");
-    fs.mkdirs(aSourcePath);
-
-    // created /b
-    final Path bDestinPath = new Path(fs.getUri().toString() + "/b");
-    fs.mkdirs(bDestinPath);
-
-    // Add a sub-directory '/b/a' to '/b'. This is to verify that rename
-    // throws exception as new destin /b/a already exists.
-    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
-    fs.mkdirs(baPath);
-
-    try {
-      fs.rename(aSourcePath, bDestinPath);
-      Assert.fail("Should fail as new destination dir exists!");
-    } catch (OMException ome) {
-      // expected as new sub-path /b/a already exists.
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
-    }
-
-    // Case-5.b) Rename file from /a/b/c/file1 to /a.
-    // Should be failed since /a/file1 exists.
-    final Path abcPath = new Path(fs.getUri().toString() + "/a/b/c");
-    fs.mkdirs(abcPath);
-    Path abcFile1 = new Path(abcPath, "/file1");
-    ContractTestUtils.touch(fs, abcFile1);
-
-    final Path aFile1 = new Path(fs.getUri().toString() + "/a/file1");
-    ContractTestUtils.touch(fs, aFile1);
-
-    final Path aDestinPath = new Path(fs.getUri().toString() + "/a");
-
-    try {
-      fs.rename(abcFile1, aDestinPath);
-      Assert.fail("Should fail as new destination file exists!");
-    } catch (OMException ome) {
-      // expected as new sub-path /b/a already exists.
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
-    }
-  }
-
-  /**
-   * Case-6) Rename directory to an existed file, should be failed.
-   */
-  @Test
-  public void testRenameDirToFile() throws Exception {
-    final String root = "/root";
-    Path rootPath = new Path(fs.getUri().toString() + root);
-    fs.mkdirs(rootPath);
-
-    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
-    ContractTestUtils.touch(fs, file1Destin);
-    Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c");
-    fs.mkdirs(abcRootPath);
-    try {
-      fs.rename(abcRootPath, file1Destin);
-      Assert.fail("key already exists /root_dir/file1");
-    } catch (OMException ome) {
-      // expected
-      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS);
-    }
-  }
-
-  /**
    * Cleanup keyTable and directoryTable explicitly as FS delete operation
    * is not yet supported.
-   *
-   * @throws IOException DB failure
-   */
-  private void tableCleanup() throws IOException {
-    OMMetadataManager metadataMgr = cluster.getOzoneManager()
-            .getMetadataManager();
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmDirectoryInfo>> dirTableIterator =
-            metadataMgr.getDirectoryTable().iterator();
-    dirTableIterator.seekToFirst();
-    ArrayList <String> dirList = new ArrayList<>();
-    while (dirTableIterator.hasNext()) {
-      String key = dirTableIterator.key();
-      if (StringUtils.isNotBlank(key)) {
-        dirList.add(key);
-      }
-      dirTableIterator.next();
-    }
-
-    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
-            cacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
-    while(cacheIterator.hasNext()){
-      cacheIterator.next();
-      cacheIterator.remove();
-    }
-
-    for (String dirKey : dirList) {
-      metadataMgr.getDirectoryTable().delete(dirKey);
-      Assert.assertNull("Unexpected entry!",
-              metadataMgr.getDirectoryTable().get(dirKey));
-    }
-
-    Assert.assertTrue("DirTable is not empty",
-            metadataMgr.getDirectoryTable().isEmpty());
-
-    Assert.assertFalse(metadataMgr.getDirectoryTable().cacheIterator()
-            .hasNext());
-
-    TableIterator<String, ? extends
-            Table.KeyValue<String, OmKeyInfo>> keyTableIterator =
-            metadataMgr.getKeyTable().iterator();
-    keyTableIterator.seekToFirst();
-    ArrayList <String> fileList = new ArrayList<>();
-    while (keyTableIterator.hasNext()) {
-      String key = keyTableIterator.key();
-      if (StringUtils.isNotBlank(key)) {
-        fileList.add(key);
-      }
-      keyTableIterator.next();
-    }
-
-    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
-            keyCacheIterator = metadataMgr.getKeyTable().cacheIterator();
-    while(keyCacheIterator.hasNext()){
-      keyCacheIterator.next();
-      keyCacheIterator.remove();
-    }
-
-    for (String fileKey : fileList) {
-      metadataMgr.getKeyTable().delete(fileKey);
-      Assert.assertNull("Unexpected entry!",
-              metadataMgr.getKeyTable().get(fileKey));
-    }
-
-    Assert.assertTrue("KeyTable is not empty",
-            metadataMgr.getKeyTable().isEmpty());
-
-    rootItemCount = 0;
-  }
-
-  /**
    * Fails if the (a) parent of dst does not exist or (b) parent is a file.
    */
   @Test
@@ -562,6 +407,31 @@ public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
     }
   }
 
+  /**
+   * Cleanup files and directories.
+   *
+   * @throws IOException DB failure
+   */
+  protected void deleteRootDir() throws IOException {
+    Path root = new Path("/");
+    FileStatus[] fileStatuses = fs.listStatus(root);
+
+    rootItemCount = 0; // reset to zero
+
+    if (fileStatuses == null) {
+      return;
+    }
+
+    for (FileStatus fStatus : fileStatuses) {
+      fs.delete(fStatus.getPath(), true);
+    }
+
+    fileStatuses = fs.listStatus(root);
+    if (fileStatuses != null) {
+      Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
+    }
+  }
+
   @Override
   @Test
   @Ignore("TODO:HDDS-2939")
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index c10a79d..d6b5dbd 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -321,6 +321,7 @@ enum Status {
 
     QUOTA_ERROR = 67;
 
+    DIRECTORY_NOT_EMPTY = 68;
 }
 
 /**
@@ -731,6 +732,9 @@ message KeyArgs {
 
     // This will be set by leader OM in HA and update the original request.
     optional FileEncryptionInfoProto fileEncryptionInfo = 15;
+
+    // This will be set when user performs delete directory recursively.
+    optional bool recursive = 16;
 }
 
 message KeyLocation {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 8aaca5f..46ffce8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2462,7 +2462,8 @@ public class KeyManagerImpl implements KeyManager {
 
     while (iterator.hasNext() && numEntries - countEntries > 0) {
       OmDirectoryInfo dirInfo = iterator.value().getValue();
-      if (!isImmediateChild(dirInfo.getParentObjectID(), prefixKeyInDB)) {
+      if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
+              prefixKeyInDB)) {
         break;
       }
 
@@ -2497,7 +2498,8 @@ public class KeyManagerImpl implements KeyManager {
     while (iterator.hasNext() && numEntries - countEntries > 0) {
       OmKeyInfo keyInfo = iterator.value().getValue();
 
-      if (!isImmediateChild(keyInfo.getParentObjectID(), prefixKeyInDB)) {
+      if (!OMFileRequest.isImmediateChild(keyInfo.getParentObjectID(),
+              prefixKeyInDB)) {
         break;
       }
 
@@ -2512,10 +2514,6 @@ public class KeyManagerImpl implements KeyManager {
     return countEntries;
   }
 
-  private boolean isImmediateChild(long parentId, long ancestorId) {
-    return parentId == ancestorId;
-  }
-
   /**
    * Helper function for listStatus to find key in FileTableCache.
    */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 6142d87..97868d8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequestV1;
@@ -142,6 +143,9 @@ public final class OzoneManagerRatisUtils {
       }
       return new OMKeyCommitRequest(omRequest);
     case DeleteKey:
+      if (omLayoutVersionV1) {
+        return new OMKeyDeleteRequestV1(omRequest);
+      }
       return new OMKeyDeleteRequest(omRequest);
     case DeleteKeys:
       return new OMKeysDeleteRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index e7b43d6..fc9bab0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -25,11 +25,14 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.base.Optional;
 import com.google.common.base.Strings;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -759,4 +762,93 @@ public final class OMFileRequest {
     }
     return toKeyParentDirStatus.getKeyInfo().getObjectID();
   }
+
+  /**
+   * Check if there are any sub path exist for the given user key path.
+   *
+   * @param omKeyInfo om key path
+   * @param metaMgr   OMMetadataManager
+   * @return true if there are any sub path, false otherwise
+   * @throws IOException DB exception
+   */
+  public static boolean hasChildren(OmKeyInfo omKeyInfo,
+      OMMetadataManager metaMgr) throws IOException {
+    return checkSubDirectoryExists(omKeyInfo, metaMgr) ||
+            checkSubFileExists(omKeyInfo, metaMgr);
+  }
+
+  private static boolean checkSubDirectoryExists(OmKeyInfo omKeyInfo,
+      OMMetadataManager metaMgr) throws IOException {
+    // Check all dirTable cache for any sub paths.
+    Table dirTable = metaMgr.getDirectoryTable();
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            cacheIter = dirTable.cacheIterator();
+
+    while (cacheIter.hasNext()) {
+      Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>> entry =
+              cacheIter.next();
+      OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue();
+      if (cacheOmDirInfo == null) {
+        continue;
+      }
+      if (isImmediateChild(cacheOmDirInfo.getParentObjectID(),
+              omKeyInfo.getObjectID())) {
+        return true; // found a sub path directory
+      }
+    }
+
+    // Check dirTable entries for any sub paths.
+    String seekDirInDB = metaMgr.getOzonePathKey(omKeyInfo.getObjectID(), "");
+    TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
+            iterator = dirTable.iterator();
+
+    iterator.seek(seekDirInDB);
+
+    if (iterator.hasNext()) {
+      OmDirectoryInfo dirInfo = iterator.value().getValue();
+      return isImmediateChild(dirInfo.getParentObjectID(),
+              omKeyInfo.getObjectID());
+    }
+    return false; // no sub paths found
+  }
+
+  private static boolean checkSubFileExists(OmKeyInfo omKeyInfo,
+      OMMetadataManager metaMgr) throws IOException {
+    // Check all fileTable cache for any sub paths.
+    Table fileTable = metaMgr.getKeyTable();
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
+            cacheIter = fileTable.cacheIterator();
+
+    while (cacheIter.hasNext()) {
+      Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry =
+              cacheIter.next();
+      OmKeyInfo cacheOmFileInfo = entry.getValue().getCacheValue();
+      if (cacheOmFileInfo == null) {
+        continue;
+      }
+      if (isImmediateChild(cacheOmFileInfo.getParentObjectID(),
+              omKeyInfo.getObjectID())) {
+        return true; // found a sub path file
+      }
+    }
+
+    // Check fileTable entries for any sub paths.
+    String seekFileInDB = metaMgr.getOzonePathKey(
+            omKeyInfo.getObjectID(), "");
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+            iterator = fileTable.iterator();
+
+    iterator.seek(seekFileInDB);
+
+    if (iterator.hasNext()) {
+      OmKeyInfo fileInfo = iterator.value().getValue();
+      return isImmediateChild(fileInfo.getParentObjectID(),
+              omKeyInfo.getObjectID()); // found a sub path file
+    }
+    return false; // no sub paths found
+  }
+
+  public static boolean isImmediateChild(long parentId, long ancestorId) {
+    return parentId == ancestorId;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
new file mode 100644
index 0000000..93531bc
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_EMPTY;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles DeleteKey request layout version V1.
+ */
+public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMKeyDeleteRequestV1.class);
+
+  public OMKeyDeleteRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest();
+
+    OzoneManagerProtocolProtos.KeyArgs keyArgs =
+        deleteKeyRequest.getKeyArgs();
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+    boolean recursive = keyArgs.getRecursive();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumKeyDeletes();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    IOException exception = null;
+    boolean acquiredLock = false;
+    OMClientResponse omClientResponse = null;
+    Result result = null;
+    OmVolumeArgs omVolumeArgs = null;
+    OmBucketInfo omBucketInfo = null;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acl
+      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY);
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      // Validate bucket and volume exists or not.
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      OzoneFileStatus keyStatus =
+              OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName,
+                      bucketName, keyName, 0);
+
+      if (keyStatus == null) {
+        throw new OMException("Key not found. Key:" + keyName, KEY_NOT_FOUND);
+      }
+
+      OmKeyInfo omKeyInfo = keyStatus.getKeyInfo();
+
+      // Set the UpdateID to current transactionLogIndex
+      omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+
+      String ozonePathKey = omMetadataManager.getOzonePathKey(
+              omKeyInfo.getParentObjectID(), omKeyInfo.getFileName());
+
+      if (keyStatus.isDirectory()) {
+        // Check if there are any sub path exists under the user requested path
+        if (!recursive && OMFileRequest.hasChildren(omKeyInfo,
+                omMetadataManager)) {
+          throw new OMException("Directory is not empty. Key:" + keyName,
+                  DIRECTORY_NOT_EMPTY);
+        }
+
+        // Update dir cache.
+        omMetadataManager.getDirectoryTable().addCacheEntry(
+                new CacheKey<>(ozonePathKey),
+                new CacheValue<>(Optional.absent(), trxnLogIndex));
+      } else {
+        // Update table cache.
+        omMetadataManager.getKeyTable().addCacheEntry(
+                new CacheKey<>(ozonePathKey),
+                new CacheValue<>(Optional.absent(), trxnLogIndex));
+      }
+
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+
+      long quotaReleased = sumBlockLengths(omKeyInfo);
+      omBucketInfo.incrUsedBytes(-quotaReleased);
+      omBucketInfo.incrUsedNamespace(-1L);
+
+      // No need to add cache entries to delete table. As delete table will
+      // be used by DeleteKeyService only, not used for any client response
+      // validation, so we don't need to add to cache.
+      // TODO: Revisit if we need it later.
+
+      omClientResponse = new OMKeyDeleteResponseV1(omResponse
+          .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
+          omKeyInfo, ozoneManager.isRatisEnabled(),
+          omBucketInfo, keyStatus.isDirectory());
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new OMKeyDeleteResponseV1(
+          createErrorOMResponse(omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+            omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+            bucketName);
+      }
+    }
+
+    // Performing audit logging outside of the lock.
+    auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap,
+        exception, userInfo));
+
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.decNumKeys();
+      LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName,
+          bucketName, keyName);
+      break;
+    case FAILURE:
+      omMetrics.incNumKeyDeleteFails();
+      LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key:{}.",
+          volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeyDeleteRequest: {}",
+          deleteKeyRequest);
+    }
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
index 74e53fe..ba022c5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
@@ -90,7 +90,6 @@ public class OMKeyRenameRequestV1 extends OMKeyRenameRequest {
     OMClientResponse omClientResponse = null;
     IOException exception = null;
     OmKeyInfo fromKeyValue;
-    String fromKey = null;
     Result result;
     try {
       if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
@@ -122,7 +121,7 @@ public class OMKeyRenameRequestV1 extends OMKeyRenameRequest {
       // case-1) fromKeyName should exist, otw throws exception
       if (fromKeyFileStatus == null) {
         // TODO: Add support for renaming open key
-        throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND);
+        throw new OMException("Key not found " + fromKeyName, KEY_NOT_FOUND);
       }
 
       // source existed
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
index 58785c0..868d8c9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
@@ -75,4 +75,12 @@ public class OMKeyDeleteResponse extends AbstractOMKeyDeleteResponse {
         omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
             omBucketInfo.getBucketName()), omBucketInfo);
   }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java
similarity index 60%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java
index 58785c0..15c1ba6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java
@@ -18,43 +18,42 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 
-import java.io.IOException;
 import javax.annotation.Nonnull;
+import java.io.IOException;
 
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
 
 /**
  * Response for DeleteKey request.
  */
-@CleanupTableInfo(cleanupTables = {KEY_TABLE, DELETED_TABLE})
-public class OMKeyDeleteResponse extends AbstractOMKeyDeleteResponse {
+@CleanupTableInfo(cleanupTables = {FILE_TABLE, DIRECTORY_TABLE, DELETED_TABLE})
+public class OMKeyDeleteResponseV1 extends OMKeyDeleteResponse {
 
-  private OmKeyInfo omKeyInfo;
-  private OmBucketInfo omBucketInfo;
+  private boolean isDeleteDirectory;
 
-  public OMKeyDeleteResponse(@Nonnull OMResponse omResponse,
+  public OMKeyDeleteResponseV1(@Nonnull OMResponse omResponse,
       @Nonnull OmKeyInfo omKeyInfo, boolean isRatisEnabled,
-      @Nonnull OmBucketInfo omBucketInfo) {
-    super(omResponse, isRatisEnabled);
-    this.omKeyInfo = omKeyInfo;
-    this.omBucketInfo = omBucketInfo;
+      @Nonnull OmBucketInfo omBucketInfo,
+      @Nonnull boolean isDeleteDirectory) {
+    super(omResponse, omKeyInfo, isRatisEnabled, omBucketInfo);
+    this.isDeleteDirectory = isDeleteDirectory;
   }
 
   /**
    * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
-  public OMKeyDeleteResponse(@Nonnull OMResponse omResponse) {
+  public OMKeyDeleteResponseV1(@Nonnull OMResponse omResponse) {
     super(omResponse);
   }
 
@@ -64,15 +63,21 @@ public class OMKeyDeleteResponse extends AbstractOMKeyDeleteResponse {
 
     // For OmResponse with failure, this should do nothing. This method is
     // not called in failure scenario in OM code.
-    String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-        omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
-    Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable();
-    addDeletionToBatch(omMetadataManager, batchOperation, keyTable, ozoneKey,
-        omKeyInfo);
+    String ozoneDbKey = omMetadataManager.getOzonePathKey(
+            getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName());
+
+    if (isDeleteDirectory) {
+      omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation,
+              ozoneDbKey);
+    } else {
+      Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable();
+      addDeletionToBatch(omMetadataManager, batchOperation, keyTable,
+              ozoneDbKey, getOmKeyInfo());
+    }
 
     // update bucket usedBytes.
     omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-        omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
-            omBucketInfo.getBucketName()), omBucketInfo);
+            omMetadataManager.getBucketKey(getOmBucketInfo().getVolumeName(),
+                    getOmBucketInfo().getBucketName()), getOmBucketInfo());
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index 2d4b5cb..61fd676 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -910,6 +910,9 @@ public final class TestOMRequestUtils {
           throws Exception {
     long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
             omMetaMgr);
+    if (org.apache.commons.lang3.StringUtils.isBlank(key)) {
+      return bucketId;
+    }
     String[] pathComponents = StringUtils.split(key, '/');
     long objectId = bucketId + 10;
     long parentId = bucketId;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
index b8e5603..b5af354 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
@@ -46,27 +46,23 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
 
   @Test
   public void testValidateAndUpdateCache() throws Exception {
-    OMRequest modifiedOmRequest =
-        doPreExecute(createDeleteKeyRequest());
-
-    OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
-
     // Add volume, bucket and key entries to OM DB.
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = addKeyToTable();
 
     OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
 
     // As we added manually to key table.
     Assert.assertNotNull(omKeyInfo);
 
+    OMRequest modifiedOmRequest =
+            doPreExecute(createDeleteKeyRequest());
+
+    OMKeyDeleteRequest omKeyDeleteRequest =
+            getOmKeyDeleteRequest(modifiedOmRequest);
+
     OMClientResponse omClientResponse =
         omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
         100L, ozoneManagerDoubleBufferHelper);
@@ -86,7 +82,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
         doPreExecute(createDeleteKeyRequest());
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
+            getOmKeyDeleteRequest(modifiedOmRequest);
 
     // Add only volume and bucket entry to DB.
     // In actual implementation we don't check for bucket/volume exists
@@ -108,7 +104,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
         doPreExecute(createDeleteKeyRequest());
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
+            getOmKeyDeleteRequest(modifiedOmRequest);
 
     OMClientResponse omClientResponse =
         omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
@@ -124,7 +120,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
         doPreExecute(createDeleteKeyRequest());
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
+            getOmKeyDeleteRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
 
@@ -145,7 +141,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
   private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception {
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(originalOmRequest);
+            getOmKeyDeleteRequest(originalOmRequest);
 
     OMRequest modifiedOmRequest = omKeyDeleteRequest.preExecute(ozoneManager);
 
@@ -170,4 +166,18 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
         .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
         .setClientId(UUID.randomUUID().toString()).build();
   }
+
+  protected String addKeyToTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(false, volumeName,
+            bucketName, keyName, clientID, replicationType, replicationFactor,
+            omMetadataManager);
+
+    return omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+  }
+
+  protected OMKeyDeleteRequest getOmKeyDeleteRequest(
+      OMRequest modifiedOmRequest) {
+    return new OMKeyDeleteRequest(modifiedOmRequest);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
new file mode 100644
index 0000000..dbba143
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+
+/**
+ * Tests OmKeyDelete request layout version V1.
+ */
+public class TestOMKeyDeleteRequestV1 extends TestOMKeyDeleteRequest {
+
+  protected OMKeyDeleteRequest getOmKeyDeleteRequest(
+      OMRequest modifiedOmRequest) {
+    return new OMKeyDeleteRequestV1(modifiedOmRequest);
+  }
+
+  protected String addKeyToTable() throws Exception {
+    String parentDir = "c/d/e";
+    String fileName = "file1";
+    String key = parentDir + "/" + fileName;
+    keyName = key; // updated key name
+
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, parentDir, omMetadataManager);
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            fileName, omKeyInfo, -1, 50, omMetadataManager);
+    return omKeyInfo.getPath();
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
index 3c22832..712a4ec 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
@@ -39,14 +39,15 @@ import java.util.List;
  */
 public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
 
+  private OmBucketInfo omBucketInfo;
+
   @Test
   public void testAddToDBBatch() throws Exception {
+    omBucketInfo = OmBucketInfo.newBuilder()
+            .setVolumeName(volumeName).setBucketName(bucketName)
+            .setCreationTime(Time.now()).build();
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setCreationTime(Time.now()).build();
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
 
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -55,14 +56,10 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
 
-    OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
-        omResponse, omKeyInfo, true, omBucketInfo);
+    OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo,
+            omResponse);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    String ozoneKey = addKeyToTable();
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
     omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -80,12 +77,11 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
 
   @Test
   public void testAddToDBBatchWithNonEmptyBlocks() throws Exception {
+    omBucketInfo = OmBucketInfo.newBuilder()
+            .setVolumeName(volumeName).setBucketName(bucketName)
+            .setCreationTime(Time.now()).build();
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setCreationTime(Time.now()).build();
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
 
     // Add block to key.
     List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
@@ -108,10 +104,7 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
 
     omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+    String ozoneKey = addKeyToTable();
 
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -120,8 +113,8 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
 
-    OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
-        omResponse, omKeyInfo, true, omBucketInfo);
+    OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo,
+            omResponse);
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
     omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -139,12 +132,10 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
 
   @Test
   public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setCreationTime(Time.now()).build();
+    omBucketInfo = OmBucketInfo.newBuilder()
+            .setVolumeName(volumeName).setBucketName(bucketName)
+            .setCreationTime(Time.now()).build();
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
 
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -153,14 +144,10 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
 
-    OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
-        omResponse, omKeyInfo, true, omBucketInfo);
+    OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo,
+            omResponse);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    String ozoneKey = addKeyToTable();
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
 
@@ -174,4 +161,22 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
 
   }
+
+  protected String addKeyToTable() throws Exception {
+    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+
+    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
+            clientID, replicationType, replicationFactor, omMetadataManager);
+    return ozoneKey;
+  }
+
+  protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    return new OMKeyDeleteResponse(omResponse, omKeyInfo, true, omBucketInfo);
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
new file mode 100644
index 0000000..3cfec38
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+
+/**
+ * Tests OMKeyDeleteResponse layout version V1.
+ */
+public class TestOMKeyDeleteResponseV1 extends TestOMKeyDeleteResponse {
+
+  @Override
+  protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    return new OMKeyDeleteResponseV1(omResponse, omKeyInfo,
+            true, getOmBucketInfo(), false);
+  }
+
+  @Override
+  protected String addKeyToTable() throws Exception {
+    // Add volume, bucket and key entries to OM DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, "", omMetadataManager);
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            keyName, omKeyInfo, -1, 50, omMetadataManager);
+    return omKeyInfo.getPath();
+  }
+
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(getOmBucketInfo());
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            getOmBucketInfo().getBucketName(), keyName, replicationType,
+            replicationFactor,
+            getOmBucketInfo().getObjectID() + 1,
+            getOmBucketInfo().getObjectID(), 100, Time.now());
+  }
+}
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 56c7e10..b4153f0 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
@@ -255,6 +256,7 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
     return true;
   }
 
+
   /**
    * Helper method to delete an object specified by key name in bucket.
    *
@@ -262,12 +264,32 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
    * @return true if the key is deleted, false otherwise
    */
   @Override
-  public boolean deleteObject(String keyName) {
+  public boolean deleteObject(String keyName) throws IOException {
+    return deleteObject(keyName, false);
+  }
+
+  /**
+   * Helper method to delete an object specified by key name in bucket.
+   *
+   * @param keyName key name to be deleted
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
+   * @return true if the key is deleted, false otherwise
+   */
+  @Override
+  public boolean deleteObject(String keyName, boolean recursive)
+      throws IOException {
     LOG.trace("issuing delete for key {}", keyName);
     try {
       incrementCounter(Statistic.OBJECTS_DELETED, 1);
-      bucket.deleteKey(keyName);
+      bucket.deleteDirectory(keyName, recursive);
       return true;
+    } catch (OMException ome) {
+      LOG.error("delete key failed {}", ome.getMessage());
+      if (OMException.ResultCodes.DIRECTORY_NOT_EMPTY == ome.getResult()) {
+        throw new PathIsNotEmptyDirectoryException(ome.getMessage());
+      }
+      return false;
     } catch (IOException ioe) {
       LOG.error("delete key failed {}", ioe.getMessage());
       return false;
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 8a466a9..ba6b7a4 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -414,7 +414,17 @@ public class BasicOzoneFileSystem extends FileSystem {
   }
 
   private boolean renameV1(String srcPath, String dstPath) throws IOException {
-    adapter.renameKey(srcPath, dstPath);
+    try {
+      adapter.renameKey(srcPath, dstPath);
+    } catch (OMException ome) {
+      LOG.error("rename key failed: {}. source:{}, destin:{}",
+              ome.getMessage(), srcPath, dstPath);
+      if (OMException.ResultCodes.KEY_ALREADY_EXISTS == ome.getResult()) {
+        return false;
+      } else {
+        throw ome;
+      }
+    }
     return true;
   }
 
@@ -498,6 +508,20 @@ public class BasicOzoneFileSystem extends FileSystem {
     incrementCounter(Statistic.INVOCATION_DELETE, 1);
     statistics.incrementWriteOps(1);
     LOG.debug("Delete path {} - recursive {}", f, recursive);
+
+    String layOutVersion = adapter.getBucketLayoutVersion();
+    if (layOutVersion != null &&
+            OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) {
+
+      if (f.isRoot()) {
+        LOG.warn("Cannot delete root directory.");
+        return false;
+      }
+
+      String key = pathToKey(f);
+      return adapter.deleteObject(key, recursive);
+    }
+
     FileStatus status;
     try {
       status = getFileStatus(f);
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 4ea08f2..84cba47 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -439,10 +440,13 @@ public class BasicRootedOzoneClientAdapterImpl
    * Helper method to delete an object specified by key name in bucket.
    *
    * @param path path to a key to be deleted
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
    * @return true if the key is deleted, false otherwise
    */
   @Override
-  public boolean deleteObject(String path) {
+  public boolean deleteObject(String path, boolean recursive)
+      throws IOException {
     LOG.trace("issuing delete for path to key: {}", path);
     incrementCounter(Statistic.OBJECTS_DELETED, 1);
     OFSPath ofsPath = new OFSPath(path);
@@ -452,14 +456,25 @@ public class BasicRootedOzoneClientAdapterImpl
     }
     try {
       OzoneBucket bucket = getBucket(ofsPath, false);
-      bucket.deleteKey(keyName);
+      bucket.deleteDirectory(keyName, recursive);
       return true;
+    } catch (OMException ome) {
+      LOG.error("delete key failed {}", ome.getMessage());
+      if (OMException.ResultCodes.DIRECTORY_NOT_EMPTY == ome.getResult()) {
+        throw new PathIsNotEmptyDirectoryException(ome.getMessage());
+      }
+      return false;
     } catch (IOException ioe) {
       LOG.error("delete key failed " + ioe.getMessage());
       return false;
     }
   }
 
+  @Override
+  public boolean deleteObject(String path) throws IOException {
+    return deleteObject(path, false);
+  }
+
   /**
    * Helper function to check if the list of key paths are in the same volume
    * and same bucket.
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
index 5b65a0e..4a4d91b 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
@@ -51,7 +51,9 @@ public interface OzoneClientAdapter {
 
   boolean createDirectory(String keyName) throws IOException;
 
-  boolean deleteObject(String keyName);
+  boolean deleteObject(String keyName) throws IOException;
+
+  boolean deleteObject(String keyName, boolean recursive) throws IOException;
 
   boolean deleteObjects(List<String> keyName);
 

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 22/29: HDDS-4835. [FSO]S3Multipart: Implement UploadAbortRequest (#1997)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 72dbf179d1e4c5fddaea68d887347a31fd92bd7e
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Tue Mar 9 17:11:31 2021 +0530

    HDDS-4835. [FSO]S3Multipart: Implement UploadAbortRequest (#1997)
---
 .../rpc/TestOzoneClientMultipartUploadV1.java      | 179 +++++++++++++++++++++
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   4 +
 .../multipart/S3MultipartUploadAbortRequest.java   |  43 +++--
 .../multipart/S3MultipartUploadAbortRequestV1.java |  90 +++++++++++
 .../S3MultipartUploadCommitPartRequestV1.java      |  23 +--
 .../S3MultipartUploadCompleteRequest.java          |  17 +-
 .../S3MultipartUploadCompleteRequestV1.java        |  16 ++
 .../S3MultipartUploadAbortResponseV1.java          |  55 +++++++
 .../TestS3InitiateMultipartUploadRequest.java      |  24 +--
 .../TestS3InitiateMultipartUploadRequestV1.java    |  57 +------
 .../s3/multipart/TestS3MultipartRequest.java       |   7 +-
 .../TestS3MultipartUploadAbortRequest.java         |  29 +++-
 .../TestS3MultipartUploadAbortRequestV1.java       |  69 ++++++++
 .../s3/multipart/TestS3MultipartResponse.java      |  25 ++-
 .../TestS3MultipartUploadAbortResponse.java        |  22 ++-
 .../TestS3MultipartUploadAbortResponseV1.java      |  75 +++++++++
 16 files changed, 622 insertions(+), 113 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
index 1ab2cc3..76feec8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
@@ -31,7 +31,10 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 
@@ -39,8 +42,12 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
 import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
 
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -49,7 +56,10 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.TreeMap;
@@ -58,6 +68,9 @@ import java.util.UUID;
 import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
 import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * This test verifies all the S3 multipart client apis - layout version V1.
@@ -450,6 +463,172 @@ public class TestOzoneClientMultipartUploadV1 {
     }
   }
 
+  @Test
+  public void testAbortUploadFail() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    OzoneTestUtils.expectOmException(NO_SUCH_MULTIPART_UPLOAD_ERROR,
+        () -> bucket.abortMultipartUpload(keyName, "random"));
+  }
+
+  @Test
+  public void testAbortUploadFailWithInProgressPartUpload() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/";
+    String keyName = parentDir + UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
+        STAND_ALONE, ONE);
+
+    Assert.assertNotNull(omMultipartInfo.getUploadID());
+
+    // Do not close output stream.
+    byte[] data = "data".getBytes(UTF_8);
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+        data.length, 1, omMultipartInfo.getUploadID());
+    ozoneOutputStream.write(data, 0, data.length);
+
+    // Abort before completing part upload.
+    bucket.abortMultipartUpload(keyName, omMultipartInfo.getUploadID());
+
+    try {
+      ozoneOutputStream.close();
+      fail("testAbortUploadFailWithInProgressPartUpload failed");
+    } catch (IOException ex) {
+      assertTrue(ex instanceof OMException);
+      assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
+          ((OMException) ex).getResult());
+    }
+  }
+
+  @Test
+  public void testAbortUploadSuccessWithOutAnyParts() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/";
+    String keyName = parentDir + UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+        ONE);
+    bucket.abortMultipartUpload(keyName, uploadID);
+  }
+
+  @Test
+  public void testAbortUploadSuccessWithParts() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/";
+    String keyName = parentDir + UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+        ONE);
+    String partName = uploadPart(bucket, keyName, uploadID, 1,
+        "data".getBytes(UTF_8));
+
+    OMMetadataManager metadataMgr =
+        cluster.getOzoneManager().getMetadataManager();
+    String multipartKey = verifyUploadedPart(volumeName, bucketName, keyName,
+        uploadID, partName, metadataMgr);
+
+    bucket.abortMultipartUpload(keyName, uploadID);
+
+    OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable().get(multipartKey);
+    OmMultipartKeyInfo omMultipartKeyInfo =
+        metadataMgr.getMultipartInfoTable().get(multipartKey);
+    Assert.assertNull(omKeyInfo);
+    Assert.assertNull(omMultipartKeyInfo);
+
+    // Since deleteTable operation is performed via
+    // batchOp - Table.putWithBatch(), which is an async operation and
+    // not making any assertion for the same.
+  }
+
+  private String verifyUploadedPart(String volumeName, String bucketName,
+      String keyName, String uploadID, String partName,
+      OMMetadataManager metadataMgr) throws IOException {
+    String multipartKey = getMultipartKey(uploadID, volumeName, bucketName,
+        keyName, metadataMgr);
+    OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable().get(multipartKey);
+    OmMultipartKeyInfo omMultipartKeyInfo =
+        metadataMgr.getMultipartInfoTable().get(multipartKey);
+
+    Assert.assertNotNull(omKeyInfo);
+    Assert.assertNotNull(omMultipartKeyInfo);
+    Assert.assertEquals(OzoneFSUtils.getFileName(keyName),
+        omKeyInfo.getKeyName());
+    Assert.assertEquals(uploadID, omMultipartKeyInfo.getUploadID());
+
+    long parentID = getParentID(volumeName, bucketName, keyName,
+        metadataMgr);
+
+    TreeMap<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap =
+        omMultipartKeyInfo.getPartKeyInfoMap();
+    for (Map.Entry<Integer, OzoneManagerProtocolProtos.PartKeyInfo> entry :
+        partKeyInfoMap.entrySet()) {
+      OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo = entry.getValue();
+      OmKeyInfo currentKeyPartInfo =
+          OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
+
+      Assert.assertEquals(OzoneFSUtils.getFileName(keyName),
+          currentKeyPartInfo.getKeyName());
+
+      // prepare dbPartName <parentID>/partFileName
+      String partFileName = OzoneFSUtils.getFileName(partName);
+      String dbPartName = metadataMgr.getOzonePathKey(parentID, partFileName);
+
+      Assert.assertEquals(dbPartName, partKeyInfo.getPartName());
+    }
+    return multipartKey;
+  }
+
+  private String getMultipartKey(String multipartUploadID, String volumeName,
+      String bucketName, String keyName, OMMetadataManager omMetadataManager)
+      throws IOException {
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    long parentID = getParentID(volumeName, bucketName, keyName,
+        omMetadataManager);
+
+    String multipartKey = omMetadataManager.getMultipartKey(parentID,
+        fileName, multipartUploadID);
+
+    return multipartKey;
+  }
+
+  private long getParentID(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    return OMFileRequest.getParentID(bucketId, pathComponents,
+        keyName, omMetadataManager);
+  }
+
   private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
       ReplicationType replicationType, ReplicationFactor replicationFactor)
           throws Exception {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index db9f32e..52305253 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixSetAclRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequestV1;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
@@ -191,6 +192,9 @@ public final class OzoneManagerRatisUtils {
       }
       return new S3MultipartUploadCommitPartRequest(omRequest);
     case AbortMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3MultipartUploadAbortRequestV1(omRequest);
+      }
       return new S3MultipartUploadAbortRequest(omRequest);
     case CompleteMultiPartUpload:
       if (isBucketFSOptimized()) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index 650133b..fb67a20 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -123,8 +123,8 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
-      multipartKey = omMetadataManager.getMultipartKey(
-          volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
+      multipartKey = getMultipartKey(keyArgs.getMultipartUploadID(),
+          volumeName, bucketName, keyName, omMetadataManager);
 
       OmKeyInfo omKeyInfo =
           omMetadataManager.getOpenKeyTable().get(multipartKey);
@@ -166,19 +166,14 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
           new CacheKey<>(multipartKey),
           new CacheValue<>(Optional.absent(), trxnLogIndex));
 
-      omClientResponse = new S3MultipartUploadAbortResponse(
-          omResponse.setAbortMultiPartUploadResponse(
-              MultipartUploadAbortResponse.newBuilder()).build(),
-          multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
-          omBucketInfo.copyObject());
+      omClientResponse = getOmClientResponse(ozoneManager, multipartKeyInfo,
+          multipartKey, omResponse, omBucketInfo);
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse =
-          new S3MultipartUploadAbortResponse(createErrorOMResponse(omResponse,
-              exception));
+      omClientResponse = getOmClientResponse(exception, omResponse);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -212,4 +207,32 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
 
     return omClientResponse;
   }
+
+  protected OMClientResponse getOmClientResponse(IOException exception,
+      OMResponse.Builder omResponse) {
+
+    return new S3MultipartUploadAbortResponse(createErrorOMResponse(omResponse,
+            exception));
+  }
+
+  protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager,
+      OmMultipartKeyInfo multipartKeyInfo, String multipartKey,
+      OMResponse.Builder omResponse, OmBucketInfo omBucketInfo) {
+
+    OMClientResponse omClientResponse = new S3MultipartUploadAbortResponse(
+        omResponse.setAbortMultiPartUploadResponse(
+            MultipartUploadAbortResponse.newBuilder()).build(),
+        multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
+        omBucketInfo.copyObject());
+    return omClientResponse;
+  }
+
+  protected String getMultipartKey(String multipartUploadID, String volumeName,
+      String bucketName, String keyName, OMMetadataManager omMetadataManager)
+      throws IOException {
+
+    String multipartKey = omMetadataManager.getMultipartKey(
+        volumeName, bucketName, keyName, multipartUploadID);
+    return multipartKey;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequestV1.java
new file mode 100644
index 0000000..ddeb10c
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequestV1.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadAbortResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
+
+/**
+ * Handles Abort of multipart upload request.
+ */
+public class S3MultipartUploadAbortRequestV1
+    extends S3MultipartUploadAbortRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(S3MultipartUploadAbortRequestV1.class);
+
+  public S3MultipartUploadAbortRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  protected OMClientResponse getOmClientResponse(IOException exception,
+      OMResponse.Builder omResponse) {
+
+    return new S3MultipartUploadAbortResponseV1(createErrorOMResponse(
+        omResponse, exception));
+  }
+
+  protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager,
+      OmMultipartKeyInfo multipartKeyInfo, String multipartKey,
+      OMResponse.Builder omResponse, OmBucketInfo omBucketInfo) {
+
+    OMClientResponse omClientResponse = new S3MultipartUploadAbortResponseV1(
+        omResponse.setAbortMultiPartUploadResponse(
+            MultipartUploadAbortResponse.newBuilder()).build(),
+        multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
+        omBucketInfo.copyObject());
+    return omClientResponse;
+  }
+
+  protected String getMultipartKey(String multipartUploadID, String volumeName,
+      String bucketName, String keyName, OMMetadataManager omMetadataManager)
+      throws IOException {
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+        keyName, omMetadataManager);
+
+    String multipartKey = omMetadataManager.getMultipartKey(parentID,
+        fileName, multipartUploadID);
+
+    return multipartKey;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
index 7aa21cf..0f23745 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestV1.java
@@ -92,7 +92,7 @@ public class S3MultipartUploadCommitPartRequestV1
         getOmRequest());
     OMClientResponse omClientResponse = null;
     OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo = null;
-    String openKey = null;
+    String openFileKey = null;
     OmKeyInfo omKeyInfo = null;
     String multipartKey = null;
     OmMultipartKeyInfo multipartKeyInfo = null;
@@ -127,14 +127,15 @@ public class S3MultipartUploadCommitPartRequestV1
 
       long clientID = multipartCommitUploadPartRequest.getClientID();
 
-      openKey = omMetadataManager.getOpenFileName(parentID, fileName, clientID);
+      openFileKey = omMetadataManager.getOpenFileName(parentID, fileName,
+          clientID);
 
       omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true,
-              omMetadataManager, openKey, keyName);
+              omMetadataManager, openFileKey, keyName);
 
       if (omKeyInfo == null) {
         throw new OMException("Failed to commit Multipart Upload key, as " +
-            openKey + " entry is not found in the openKey table",
+            openFileKey + " entry is not found in the openFileTable",
             KEY_NOT_FOUND);
       }
 
@@ -150,7 +151,7 @@ public class S3MultipartUploadCommitPartRequestV1
 
       /**
        * Format of PartName stored into MultipartInfoTable is,
-       * "fileName + ClientID".
+       * "<parentID>/fileName + ClientID".
        *
        * Contract is that all part names present in a multipart info will
        * have same key prefix path.
@@ -159,7 +160,9 @@ public class S3MultipartUploadCommitPartRequestV1
        *        /vol1/buck1/a/b/c/part-1, /vol1/buck1/a/b/c/part-2,
        *        /vol1/buck1/a/b/c/part-n
        */
-      dbPartName = fileName + clientID;
+      String ozoneFileKey = omMetadataManager.getOzonePathKey(parentID,
+          fileName);
+      dbPartName = ozoneFileKey + clientID;
 
       if (multipartKeyInfo == null) {
         // This can occur when user started uploading part by the time commit
@@ -204,7 +207,7 @@ public class S3MultipartUploadCommitPartRequestV1
               trxnLogIndex));
 
       omMetadataManager.getOpenKeyTable().addCacheEntry(
-          new CacheKey<>(openKey),
+          new CacheKey<>(openFileKey),
           new CacheValue<>(Optional.absent(), trxnLogIndex));
 
       long scmBlockSize = ozoneManager.getScmBlockSize();
@@ -228,7 +231,7 @@ public class S3MultipartUploadCommitPartRequestV1
               .setPartName(fullKeyPartName));
 
       omClientResponse = new S3MultipartUploadCommitPartResponseV1(
-          omResponse.build(), multipartKey, openKey,
+          omResponse.build(), multipartKey, openFileKey,
           multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
           ozoneManager.isRatisEnabled(),
           omBucketInfo.copyObject());
@@ -238,8 +241,8 @@ public class S3MultipartUploadCommitPartRequestV1
       result = Result.FAILURE;
       exception = ex;
       omClientResponse = new S3MultipartUploadCommitPartResponseV1(
-          createErrorOMResponse(omResponse, exception), multipartKey, openKey,
-          multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
+          createErrorOMResponse(omResponse, exception), multipartKey,
+          openFileKey, multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
           ozoneManager.isRatisEnabled(), copyBucketInfo);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index d396e8e..162cf2f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -34,9 +34,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -433,22 +431,11 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
     return dataSize;
   }
 
-  private String preparePartName(String requestedVolume,
+  protected String preparePartName(String requestedVolume,
       String requestedBucket, String keyName, PartKeyInfo partKeyInfo,
       OMMetadataManager omMetadataManager) {
 
-    String partName;
-    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
-      String parentPath = OzoneFSUtils.getParent(keyName);
-      StringBuffer keyPath = new StringBuffer(parentPath);
-      keyPath.append(partKeyInfo.getPartName());
-
-      partName = omMetadataManager.getOzoneKey(requestedVolume,
-              requestedBucket, keyPath.toString());
-    } else {
-      partName = partKeyInfo.getPartName();
-    }
-    return partName;
+    return partKeyInfo.getPartName();
   }
 
   private static String failureMessage(String volume, String bucket,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestV1.java
index 4ab9ee7..37f2dad 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestV1.java
@@ -252,6 +252,7 @@ public class S3MultipartUploadCompleteRequestV1
             new CacheValue<>(Optional.absent(), transactionLogIndex));
   }
 
+  @Override
   protected void updatePrefixFSOInfo(OmKeyInfo dbOpenKeyInfo,
                                      OmKeyInfo.Builder builder) {
     // updates parentID and fileName
@@ -259,6 +260,21 @@ public class S3MultipartUploadCompleteRequestV1
     builder.setFileName(dbOpenKeyInfo.getFileName());
   }
 
+  @Override
+  protected String preparePartName(String requestedVolume,
+      String requestedBucket, String keyName, PartKeyInfo partKeyInfo,
+      OMMetadataManager omMetadataManager) {
+
+    String parentPath = OzoneFSUtils.getParent(keyName);
+    StringBuffer keyPath = new StringBuffer(parentPath);
+    String partFileName = OzoneFSUtils.getFileName(partKeyInfo.getPartName());
+    keyPath.append(partFileName);
+
+    return omMetadataManager.getOzoneKey(requestedVolume,
+        requestedBucket, keyPath.toString());
+  }
+
+
   private static String failureMessage(String volume, String bucket,
                                        String keyName) {
     return "Complete Multipart Upload Failed: volume: " +
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseV1.java
new file mode 100644
index 0000000..3ed05ef
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseV1.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTFILEINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for Multipart Abort Request layout version V1.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, DELETED_TABLE,
+    MULTIPARTFILEINFO_TABLE})
+public class S3MultipartUploadAbortResponseV1
+    extends S3MultipartUploadAbortResponse {
+
+  public S3MultipartUploadAbortResponseV1(@Nonnull OMResponse omResponse,
+      String multipartKey, @Nonnull OmMultipartKeyInfo omMultipartKeyInfo,
+      boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo) {
+
+    super(omResponse, multipartKey, omMultipartKeyInfo, isRatisEnabled,
+        omBucketInfo);
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public S3MultipartUploadAbortResponseV1(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
index 088b232..01561e9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
@@ -56,7 +56,7 @@ public class TestS3InitiateMultipartUploadRequest
         bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
+        getS3InitiateMultipartUploadReq(modifiedRequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -65,8 +65,8 @@ public class TestS3InitiateMultipartUploadRequest
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+        modifiedRequest.getInitiateMultiPartUploadRequest()
             .getKeyArgs().getMultipartUploadID());
 
     Assert.assertNotNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
@@ -102,7 +102,7 @@ public class TestS3InitiateMultipartUploadRequest
         volumeName, bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
+        getS3InitiateMultipartUploadReq(modifiedRequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -111,8 +111,8 @@ public class TestS3InitiateMultipartUploadRequest
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+        modifiedRequest.getInitiateMultiPartUploadRequest()
             .getKeyArgs().getMultipartUploadID());
 
     Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
@@ -130,7 +130,7 @@ public class TestS3InitiateMultipartUploadRequest
         keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
+        getS3InitiateMultipartUploadReq(modifiedRequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -139,12 +139,18 @@ public class TestS3InitiateMultipartUploadRequest
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+        modifiedRequest.getInitiateMultiPartUploadRequest()
             .getKeyArgs().getMultipartUploadID());
 
     Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
     Assert.assertNull(omMetadataManager.getMultipartInfoTable()
         .get(multipartKey));
   }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+                                   String keyName, String multipartUploadID) {
+    return omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
index 5fa75ba..4c9dbb5 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestV1.java
@@ -65,8 +65,8 @@ public class TestS3InitiateMultipartUploadRequestV1
     OMRequest modifiedRequest = doPreExecuteInitiateMPUV1(volumeName,
         bucketName, keyName);
 
-    S3InitiateMultipartUploadRequestV1 s3InitiateMultipartUploadRequestV1 =
-        new S3InitiateMultipartUploadRequestV1(modifiedRequest);
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequestV1 =
+        getS3InitiateMultipartUploadReq(modifiedRequest);
 
     OMClientResponse omClientResponse =
             s3InitiateMultipartUploadRequestV1.validateAndUpdateCache(
@@ -112,54 +112,6 @@ public class TestS3InitiateMultipartUploadRequestV1
             .getCreationTime());
   }
 
-  @Test
-  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-
-    OMRequest modifiedRequest = doPreExecuteInitiateMPU(
-        volumeName, bucketName, keyName);
-
-    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
-
-    OMClientResponse omClientResponse =
-        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
-            100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isEmpty());
-    Assert.assertTrue(omMetadataManager.getMultipartInfoTable().isEmpty());
-  }
-
-  @Test
-  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
-    String volumeName = UUID.randomUUID().toString();
-    String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
-
-    OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, bucketName,
-        keyName);
-
-    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
-
-    OMClientResponse omClientResponse =
-        s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
-            100L, ozoneManagerDoubleBufferHelper);
-
-    Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
-        omClientResponse.getOMResponse().getStatus());
-
-    Assert.assertTrue(omMetadataManager.getOpenKeyTable().isEmpty());
-    Assert.assertTrue(omMetadataManager.getMultipartInfoTable().isEmpty());
-  }
-
   private long verifyDirectoriesInDB(List<String> dirs, long bucketID)
       throws IOException {
     // bucketID is the parent
@@ -179,4 +131,9 @@ public class TestS3InitiateMultipartUploadRequestV1
     }
     return parentID;
   }
+
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+      OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequestV1(initiateMPURequest);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index 16cb4ae..d08b4ae 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -183,7 +183,7 @@ public class TestS3MultipartRequest {
 
 
     S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(omRequest);
+        getS3MultipartUploadAbortReq(omRequest);
 
     OMRequest modifiedRequest =
         s3MultipartUploadAbortRequest.preExecute(ozoneManager);
@@ -262,4 +262,9 @@ public class TestS3MultipartRequest {
     return new S3InitiateMultipartUploadRequest(initiateMPURequest);
   }
 
+  protected S3MultipartUploadAbortRequest getS3MultipartUploadAbortReq(
+      OMRequest omRequest) {
+    return new S3MultipartUploadAbortRequest(omRequest);
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
index d0b61c7..9bff636 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
@@ -50,16 +50,18 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
   public void testValidateAndUpdateCache() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
+    createParentPath(volumeName, bucketName);
+
     OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
         bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
+        getS3InitiateMultipartUploadReq(initiateMPURequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -73,15 +75,15 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
             multipartUploadID);
 
     S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(abortMPURequest);
+        getS3MultipartUploadAbortReq(abortMPURequest);
 
     omClientResponse =
         s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
             ozoneManagerDoubleBufferHelper);
 
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+        multipartUploadID);
 
     // Check table and response.
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
@@ -108,7 +110,7 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
             multipartUploadID);
 
     S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(abortMPURequest);
+        getS3MultipartUploadAbortReq(abortMPURequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
@@ -177,4 +179,19 @@ public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest {
         omClientResponse.getOMResponse().getStatus());
 
   }
+
+  protected String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
+
+  protected void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // no parent hierarchy
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    return omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestV1.java
new file mode 100644
index 0000000..fd8a158
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestV1.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+
+import java.util.UUID;
+
+/**
+ * Test Multipart upload abort request.
+ */
+public class TestS3MultipartUploadAbortRequestV1
+    extends TestS3MultipartUploadAbortRequest {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  @Override
+  protected S3MultipartUploadAbortRequest getS3MultipartUploadAbortReq(
+      OMRequest omRequest) {
+    return new S3MultipartUploadAbortRequestV1(omRequest);
+  }
+
+  @Override
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+      OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequestV1(initiateMPURequest);
+  }
+
+  @Override
+  protected String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  @Override
+  protected void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+        dirName, omMetadataManager);
+  }
+
+  @Override
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    String fileName = StringUtils.substringAfter(keyName, dirName);
+    return omMetadataManager.getMultipartKey(parentID, fileName,
+        multipartUploadID);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 6f4d6fa..c10ff77 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -116,8 +116,8 @@ public class TestS3MultipartResponse {
                 .setKeyName(keyName)
                 .setMultipartUploadID(multipartUploadID)).build();
 
-    return new S3InitiateMultipartUploadResponse(omResponse, multipartKeyInfo,
-        omKeyInfo);
+    return getS3InitiateMultipartUploadResp(multipartKeyInfo, omKeyInfo,
+        omResponse);
   }
 
   public S3MultipartUploadAbortResponse createS3AbortMPUResponse(
@@ -130,11 +130,10 @@ public class TestS3MultipartResponse {
         .setAbortMultiPartUploadResponse(
             MultipartUploadAbortResponse.newBuilder().build()).build();
 
-    return new S3MultipartUploadAbortResponse(omResponse, multipartKey,
-        omMultipartKeyInfo, true, omBucketInfo);
+    return getS3MultipartUploadAbortResp(multipartKey,
+        omMultipartKeyInfo, omBucketInfo, omResponse);
   }
 
-
   public void addPart(int partNumber, PartKeyInfo partKeyInfo,
       OmMultipartKeyInfo omMultipartKeyInfo) {
     omMultipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo);
@@ -162,7 +161,7 @@ public class TestS3MultipartResponse {
       int partNumber) {
     return PartKeyInfo.newBuilder()
         .setPartNumber(partNumber)
-        .setPartName(omMetadataManager.getMultipartKey(parentID, fileName,
+        .setPartName(omMetadataManager.getOzonePathKey(parentID, fileName +
                 UUID.randomUUID().toString()))
         .setPartKeyInfo(KeyInfo.newBuilder()
             .setVolumeName(volumeName)
@@ -295,4 +294,18 @@ public class TestS3MultipartResponse {
     return omMetadataManager.getMultipartKey(parentID, fileName,
             multipartUploadID);
   }
+
+  protected S3InitiateMultipartUploadResponse getS3InitiateMultipartUploadResp(
+      OmMultipartKeyInfo multipartKeyInfo, OmKeyInfo omKeyInfo,
+      OMResponse omResponse) {
+    return new S3InitiateMultipartUploadResponse(omResponse, multipartKeyInfo,
+        omKeyInfo);
+  }
+
+  protected S3MultipartUploadAbortResponse getS3MultipartUploadAbortResp(
+      String multipartKey, OmMultipartKeyInfo omMultipartKeyInfo,
+      OmBucketInfo omBucketInfo, OMResponse omResponse) {
+    return new S3MultipartUploadAbortResponse(omResponse, multipartKey,
+        omMultipartKeyInfo, true, omBucketInfo);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
index a11c4db..ae8650e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
@@ -43,10 +43,10 @@ public class TestS3MultipartUploadAbortResponse
 
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
     String multipartUploadID = UUID.randomUUID().toString();
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+        multipartUploadID);
 
     OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
@@ -82,10 +82,10 @@ public class TestS3MultipartUploadAbortResponse
 
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
     String multipartUploadID = UUID.randomUUID().toString();
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+        multipartUploadID);
 
     OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
@@ -154,4 +154,14 @@ public class TestS3MultipartUploadAbortResponse
         ro.getOmKeyInfoList().get(0));
   }
 
+  protected String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    return omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponseV1.java
new file mode 100644
index 0000000..115b277
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponseV1.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Test multipart upload abort response.
+ */
+public class TestS3MultipartUploadAbortResponseV1
+    extends TestS3MultipartUploadAbortResponse {
+
+  private String dirName = "abort/b/c/";
+
+  private long parentID = 1027;
+
+  protected String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    String fileName = StringUtils.substringAfter(keyName, dirName);
+    return omMetadataManager.getMultipartKey(parentID, fileName,
+        multipartUploadID);
+  }
+
+  protected S3InitiateMultipartUploadResponse getS3InitiateMultipartUploadResp(
+      OmMultipartKeyInfo multipartKeyInfo, OmKeyInfo omKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    return new S3InitiateMultipartUploadResponseV1(omResponse, multipartKeyInfo,
+        omKeyInfo, new ArrayList<>());
+  }
+
+  protected S3MultipartUploadAbortResponse getS3MultipartUploadAbortResp(
+      String multipartKey, OmMultipartKeyInfo omMultipartKeyInfo,
+      OmBucketInfo omBucketInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    return new S3MultipartUploadAbortResponseV1(omResponse, multipartKey,
+        omMultipartKeyInfo, true, omBucketInfo);
+  }
+
+  @Override
+  public OzoneManagerProtocolProtos.PartKeyInfo createPartKeyInfo(
+      String volumeName, String bucketName, String keyName, int partNumber) {
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return createPartKeyInfoV1(volumeName, bucketName, parentID, fileName,
+        partNumber);
+  }
+}

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org