You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ra...@apache.org on 2020/11/02 06:36:06 UTC

[ozone] branch HDDS-2939 updated (d74b856 -> 181bb34)

This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a change to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git.


 discard d74b856  HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)
    omit 51bb909  HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)
    omit 6f2fa22  HDDS-2949: mkdir : store directory entries in a separate table (#1404)
     add 2650723  HDDS-4343. ReplicationManager.handleOverReplicatedContainer() does not handle unhealthyReplicas properly. (#1495)
     add 814428e  HDDS-3995. Fix s3g met NPE exception while write file by multiPartUpload (#1499)
     add 342bf6d  HDDS-4327. Potential resource leakage using BatchOperation. (#1493)
     add aff7c52  HDDS-4269. Ozone DataNode thinks a volume is failed if an unexpected file is in the HDDS root directory. (#1490)
     add 8fab5f2  HDDS-2922. Balance ratis leader distribution in datanodes (#1371)
     add 25e2046  HDDS-4297. Allow multiple transactions per container to be sent for deletion by SCM.
     add 9baaf64  HDDS-4158. Provide a class type for Java based configuration (#1407)
     add 049793d  HDDS-4301. SCM CA certificate does not encode KeyUsage extension properly (#1468)
     add 10df489  HDDS-4328. Provide fallback cache restore key (#1505)
     add 4e1d2ef  HDDS-4306. Ozone checkstyle rule can't be imported to IntelliJ. (#1472)
     add 6ba64bf  HDDS-4359. Expose VolumeIOStats in DN JMX (#1506)
     add c1069a6  HDDS-4362. Change hadoop32 test to use 3.2 image (#1521)
     add 6112603  HDDS-4088. Adding Owner info for Authorizer plugin to honor owner access rights (#1395)
     add 808aabd  HDDS-3882. Update modification time when updating volume/bucket/key ACLs (#1301)
     add 18711f3  HDDS-4388. Make writeStateMachineTimeout retry count proportional to node failure timeout (#1519)
     add 5e5635f  HDDS-4386: Each EndpointStateMachine uses its own thread pool to talk with SCM/Recon (#1518)
     add c67aa2f  HDDS-3731. [doc]add storage space quota document. (#1516)
     add 3861e77  HDDS-4117. Normalize Keypath for listKeys. (#1451)
     add a9c3d8c  HDDS-4405. Proxy failover is logging with out trying all OMS. (#1531)
     add df43d85  HDDS-4366. SCM deletion service should delete configured number of blocks every interval. (#1513)
     add b42449e  HDDS-4400. Make raft log directory deletion configurable during pipeline remove. (#1527)
     add 1ac1e24  HDDS-3959. Avoid HddsProtos.PipelineID#toString (#1525)
     add d8f4dc1  HDDS-4345. Replace the deprecated Lock method (#1497)
     add 8d3d6e4  HDDS-4391. UnixPath.toUri() is expensive. (#1536)
     new 88a4108  HDDS-2949: mkdir : store directory entries in a separate table (#1404)
     new da1cb75  HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)
     new 181bb34  HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (d74b856)
            \
             N -- N -- N   refs/heads/HDDS-2939 (181bb34)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .github/workflows/post-commit.yml                  |   3 +
 .../hadoop/hdds/scm/storage/CommitWatcher.java     |   2 +-
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |  19 ++
 .../java/org/apache/hadoop/hdds/scm/ScmConfig.java |  20 ++
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |   3 +
 .../hadoop/hdds/scm/exceptions/SCMException.java   |   3 +-
 .../apache/hadoop/hdds/scm/pipeline/Pipeline.java  |  42 ++-
 .../common/src/main/resources/ozone-default.xml    |  18 ++
 .../hadoop/hdds/conf/SimpleConfiguration.java      |  13 +
 .../hadoop/hdds/conf/TestOzoneConfiguration.java   |   7 +-
 .../org/apache/hadoop/hdds/conf/ConfigTag.java     |   3 +-
 .../org/apache/hadoop/hdds/conf/ConfigType.java    |   3 +-
 .../hdds/conf/ConfigurationReflectionUtil.java     |  12 +
 .../common/statemachine/EndpointStateMachine.java  |  16 ++
 .../ClosePipelineCommandHandler.java               |  15 +-
 .../CreatePipelineCommandHandler.java              |  34 ++-
 .../commandhandler/DeleteBlocksCommandHandler.java |  65 ++---
 .../states/datanode/RunningDatanodeState.java      |   8 +-
 .../common/transport/server/XceiverServerSpi.java  |  10 +-
 .../transport/server/ratis/XceiverServerRatis.java |  64 ++++-
 .../container/common/utils/HddsVolumeUtil.java     |   5 +
 .../ozone/container/common/volume/HddsVolume.java  |   8 +-
 .../container/common/volume/VolumeIOStats.java     |  72 +++--
 .../container/keyvalue/impl/BlockManagerImpl.java  |  70 ++---
 .../background/BlockDeletingService.java           |  45 ++--
 .../protocol/commands/CreatePipelineCommand.java   |  62 ++++-
 .../TestCreatePipelineCommandHandler.java          |   7 +-
 hadoop-hdds/dev-support/checkstyle/checkstyle.xml  |   4 +-
 hadoop-hdds/docs/content/feature/Quota.md          |  74 +++++
 hadoop-hdds/docs/content/feature/Quota.zh.md       |  67 +++++
 .../hdds/conf/DatanodeRatisServerConfig.java       |  17 ++
 .../certificates/utils/CertificateSignRequest.java |   2 +-
 .../certificates/utils/SelfSignedCertificate.java  |   4 +-
 .../interface-client/src/main/proto/hdds.proto     |   1 +
 .../proto/ScmServerDatanodeHeartbeatProtocol.proto |   1 +
 .../src/main/proto/ScmServerProtocol.proto         |   1 +
 .../block/DatanodeDeletedBlockTransactions.java    | 132 +++------
 .../hadoop/hdds/scm/block/DeletedBlockLog.java     |   4 +-
 .../hadoop/hdds/scm/block/DeletedBlockLogImpl.java |  73 +++--
 .../hdds/scm/block/SCMBlockDeletingService.java    | 130 ++++-----
 .../hdds/scm/container/ReplicationManager.java     |   3 +-
 .../hdds/scm/container/SCMContainerManager.java    |  28 +-
 .../hdds/scm/pipeline/PipelineStateManager.java    |   3 +-
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |  29 +-
 .../algorithms/DefaultLeaderChoosePolicy.java}     |  26 +-
 .../choose/algorithms/LeaderChoosePolicy.java      |  55 ++++
 .../algorithms/LeaderChoosePolicyFactory.java      |  75 ++++++
 .../algorithms/MinLeaderCountChoosePolicy.java     |  91 +++++++
 .../leader/choose/algorithms}/package-info.java    |   6 +-
 .../hadoop/hdds/scm/block/TestDeletedBlockLog.java | 100 +++----
 .../choose/algorithms/TestLeaderChoosePolicy.java  |  74 +++++
 hadoop-ozone/common/pom.xml                        |   5 +
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |  39 +++
 .../ozone/om/ha/OMFailoverProxyProvider.java       |   2 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   4 +-
 .../hadoop/ozone/security/acl/OzoneObjInfo.java    |  10 +
 .../hadoop/ozone/security/acl/RequestContext.java  |  48 +++-
 .../hadoop/ozone/om/lock/TestOzoneManagerLock.java |  52 ++--
 .../dist/src/main/compose/ozone-mr/hadoop32/.env   |   6 +-
 .../dist/src/main/compose/ozonesecure-mr/.env      |   3 +-
 .../compose/ozonesecure-mr/docker-compose.yaml     |   8 +-
 .../dist/src/main/compose/ozonesecure-mr/test.sh   |   1 +
 .../fs/ozone/TestOzoneFSWithObjectStoreCreate.java |  71 ++++-
 .../hdds/scm/pipeline/TestLeaderChoosePolicy.java  | 216 +++++++++++++++
 .../TestRatisPipelineCreateAndDestroy.java         |   2 +-
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |   4 +-
 .../commandhandler/TestBlockDeletion.java          |   7 +-
 .../container/metrics/TestContainerMetrics.java    |  14 +
 .../org/apache/hadoop/ozone/om/TestOmAcls.java     |   9 +-
 .../ozone/om/TestOzoneManagerListVolumes.java      |  36 +--
 .../ozone/om/TestOzoneManagerRocksDBLogging.java   |   2 +-
 .../src/main/proto/OmClientProtocol.proto          |   3 +
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |  32 +--
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  67 +++--
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  87 ++++--
 .../apache/hadoop/ozone/om/PrefixManagerImpl.java  |  24 +-
 .../hadoop/ozone/om/S3SecretManagerImpl.java       |   8 +-
 .../apache/hadoop/ozone/om/VolumeManagerImpl.java  |  36 +--
 .../hadoop/ozone/om/request/OMClientRequest.java   |  46 ++--
 .../om/request/bucket/acl/OMBucketAclRequest.java  |  15 ++
 .../request/bucket/acl/OMBucketAddAclRequest.java  |  15 ++
 .../bucket/acl/OMBucketRemoveAclRequest.java       |  15 ++
 .../request/bucket/acl/OMBucketSetAclRequest.java  |  15 ++
 .../ozone/om/request/key/OMKeyCommitRequest.java   |   4 +-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |  34 +++
 .../ozone/om/request/key/OMKeysDeleteRequest.java  |   4 +-
 .../ozone/om/request/key/OMKeysRenameRequest.java  |   9 +-
 .../ozone/om/request/key/acl/OMKeyAclRequest.java  |  15 ++
 .../om/request/key/acl/OMKeyAddAclRequest.java     |  15 ++
 .../om/request/key/acl/OMKeyRemoveAclRequest.java  |  15 ++
 .../om/request/key/acl/OMKeySetAclRequest.java     |  15 ++
 .../request/key/acl/prefix/OMPrefixAclRequest.java |   7 +
 .../hadoop/ozone/om/request/util/ObjectParser.java |   6 +-
 .../om/request/volume/OMVolumeSetOwnerRequest.java |   2 +-
 .../om/request/volume/acl/OMVolumeAclRequest.java  |  14 +
 .../request/volume/acl/OMVolumeAddAclRequest.java  |  15 ++
 .../volume/acl/OMVolumeRemoveAclRequest.java       |  15 ++
 .../request/volume/acl/OMVolumeSetAclRequest.java  |  15 ++
 .../ozone/security/acl/OzoneNativeAuthorizer.java  |  44 ++-
 .../ozone/om/request/TestOMRequestUtils.java       |  60 +++++
 .../bucket/acl/TestOMBucketAddAclRequest.java      | 119 ++++++++
 .../bucket/acl/TestOMBucketRemoveAclRequest.java   | 137 ++++++++++
 .../bucket/acl/TestOMBucketSetAclRequest.java      | 125 +++++++++
 .../ozone/om/request/bucket/acl/package-info.java  |   4 +-
 .../ozone/om/request/key/TestOMKeyAclRequest.java  | 155 ++++++++++-
 .../key/TestOMKeyPurgeRequestAndResponse.java      |  15 +-
 .../volume/acl/TestOMVolumeAddAclRequest.java      |   7 +
 .../volume/acl/TestOMVolumeRemoveAclRequest.java   |   8 +
 .../volume/acl/TestOMVolumeSetAclRequest.java      |   7 +
 .../bucket/TestOMBucketCreateResponse.java         |   8 +
 .../bucket/TestOMBucketDeleteResponse.java         |   8 +
 .../bucket/TestOMBucketSetPropertyResponse.java    |   9 +
 .../file/TestOMDirectoryCreateResponse.java        |   8 +
 .../ozone/om/response/key/TestOMKeyResponse.java   |   3 +
 .../s3/multipart/TestS3MultipartResponse.java      |   9 +
 .../security/TestOMDelegationTokenResponse.java    |   8 +
 .../volume/TestOMVolumeCreateResponse.java         |   8 +
 .../volume/TestOMVolumeDeleteResponse.java         |   8 +
 .../volume/TestOMVolumeSetOwnerResponse.java       |   9 +
 .../volume/TestOMVolumeSetQuotaResponse.java       |   9 +
 .../hadoop/ozone/security/acl/TestVolumeOwner.java | 298 +++++++++++++++++++++
 .../recon/tasks/TestContainerKeyMapperTask.java    |   2 +-
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |   5 +-
 pom.xml                                            |   4 +-
 124 files changed, 3113 insertions(+), 680 deletions(-)
 create mode 100644 hadoop-hdds/docs/content/feature/Quota.md
 create mode 100644 hadoop-hdds/docs/content/feature/Quota.zh.md
 copy hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/{choose/algorithms/RandomPipelineChoosePolicy.java => leader/choose/algorithms/DefaultLeaderChoosePolicy.java} (54%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicy.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicyFactory.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/MinLeaderCountChoosePolicy.java
 copy hadoop-hdds/{client/src/test/java/org/apache/hadoop/hdds/scm/storage => server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms}/package-info.java (88%)
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/TestLeaderChoosePolicy.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
 create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketAddAclRequest.java
 create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketRemoveAclRequest.java
 create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketSetAclRequest.java
 copy hadoop-ozone/ozone-manager/src/{main => test}/java/org/apache/hadoop/ozone/om/request/bucket/acl/package-info.java (92%)
 create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 02/03: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit da1cb752ef370103e75207f4190b10c4f8e15e02
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Tue Oct 13 22:48:35 2020 +0530

    HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable (#1473)
---
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  | 108 ++++++++-
 .../apache/hadoop/fs/ozone/TestOzoneFileOps.java   | 231 ++++++++++++++++++
 .../src/main/proto/OmClientProtocol.proto          |   1 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  15 +-
 .../hadoop/ozone/om/codec/OmKeyInfoCodec.java      |   8 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  37 ++-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  19 +-
 .../request/file/OMDirectoryCreateRequestV1.java   |  21 +-
 .../ozone/om/request/file/OMFileCreateRequest.java |  53 +++--
 .../om/request/file/OMFileCreateRequestV1.java     | 259 +++++++++++++++++++++
 .../ozone/om/request/file/OMFileRequest.java       | 132 ++++++++++-
 .../ozone/om/request/key/OMKeyCommitRequest.java   |  34 ++-
 ...ommitRequest.java => OMKeyCommitRequestV1.java} | 242 ++++++++++---------
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  | 242 ++++++++++---------
 .../om/response/file/OMFileCreateResponseV1.java   |  91 ++++++++
 .../ozone/om/response/key/OMKeyCommitResponse.java |  19 ++
 ...mitResponse.java => OMKeyCommitResponseV1.java} |  58 ++---
 .../ozone/om/response/key/OMKeyCreateResponse.java |  18 +-
 .../ozone/om/request/TestOMRequestUtils.java       | 110 +++++++++
 .../file/TestOMDirectoryCreateRequestV1.java       |   1 +
 .../om/request/file/TestOMFileCreateRequest.java   |  91 +++++---
 .../om/request/file/TestOMFileCreateRequestV1.java | 192 +++++++++++++++
 .../om/request/key/TestOMKeyCommitRequest.java     |  74 ++++--
 .../om/request/key/TestOMKeyCommitRequestV1.java   | 106 +++++++++
 .../ozone/om/request/key/TestOMKeyRequest.java     |   9 +-
 .../response/file/TestOMFileCreateResponseV1.java  |  74 ++++++
 .../om/response/key/TestOMKeyCommitResponse.java   |  68 ++++--
 .../om/response/key/TestOMKeyCommitResponseV1.java | 101 ++++++++
 .../om/response/key/TestOMKeyCreateResponse.java   |  38 +--
 .../ozone/om/response/key/TestOMKeyResponse.java   |  25 +-
 30 files changed, 2088 insertions(+), 389 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index d0e8bee..494e1b9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -25,9 +25,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
@@ -52,6 +54,8 @@ public final class OmKeyInfo extends WithObjectID {
   private HddsProtos.ReplicationType type;
   private HddsProtos.ReplicationFactor factor;
   private FileEncryptionInfo encInfo;
+  private String fileName; // leaf node name
+  private long parentObjectID; // pointer to parent directory
 
   /**
    * ACL Information.
@@ -94,6 +98,22 @@ public final class OmKeyInfo extends WithObjectID {
     this.updateID = updateID;
   }
 
+  @SuppressWarnings("parameternumber")
+  OmKeyInfo(String volumeName, String bucketName, String keyName,
+            String fileName, List<OmKeyLocationInfoGroup> versions,
+            long dataSize, long creationTime, long modificationTime,
+            HddsProtos.ReplicationType type,
+            HddsProtos.ReplicationFactor factor,
+            Map<String, String> metadata,
+            FileEncryptionInfo encInfo, List<OzoneAcl> acls,
+            long parentObjectID, long objectID, long updateID) {
+    this(volumeName, bucketName, keyName, versions, dataSize,
+            creationTime, modificationTime, type, factor, metadata, encInfo,
+            acls, objectID, updateID);
+    this.fileName = fileName;
+    this.parentObjectID = parentObjectID;
+  }
+
   public String getVolumeName() {
     return volumeName;
   }
@@ -126,6 +146,19 @@ public final class OmKeyInfo extends WithObjectID {
     this.dataSize = size;
   }
 
+  public void setFileName(String fileName) {
+    this.fileName = fileName;
+  }
+
+  public String getFileName() {
+    return fileName;
+  }
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+
   public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() {
     return keyLocationVersions.size() == 0? null :
         keyLocationVersions.get(keyLocationVersions.size() - 1);
@@ -267,6 +300,9 @@ public final class OmKeyInfo extends WithObjectID {
     private List<OzoneAcl> acls;
     private long objectID;
     private long updateID;
+    // not persisted to DB. FileName will be the last element in path keyName.
+    private String fileName;
+    private long parentObjectID;
 
     public Builder() {
       this.metadata = new HashMap<>();
@@ -369,11 +405,22 @@ public final class OmKeyInfo extends WithObjectID {
       return this;
     }
 
+    public Builder setFileName(String keyFileName) {
+      this.fileName = keyFileName;
+      return this;
+    }
+
+    public Builder setParentObjectID(long parentID) {
+      this.parentObjectID = parentID;
+      return this;
+    }
+
     public OmKeyInfo build() {
       return new OmKeyInfo(
-          volumeName, bucketName, keyName, omKeyLocationInfoGroups,
-          dataSize, creationTime, modificationTime, type, factor, metadata,
-          encInfo, acls, objectID, updateID);
+              volumeName, bucketName, keyName, fileName,
+              omKeyLocationInfoGroups, dataSize, creationTime,
+              modificationTime, type, factor, metadata, encInfo, acls,
+              parentObjectID, objectID, updateID);
     }
   }
 
@@ -386,11 +433,32 @@ public final class OmKeyInfo extends WithObjectID {
   }
 
   /**
+   * For network transmit.
+   *
+   * @param fullKeyName the user given full key name
+   * @return key info with the user given full key name
+   */
+  public KeyInfo getProtobuf(String fullKeyName) {
+    return getProtobuf(false, fullKeyName);
+  }
+
+  /**
    *
    * @param ignorePipeline true for persist to DB, false for network transmit.
    * @return
    */
   public KeyInfo getProtobuf(boolean ignorePipeline) {
+    return getProtobuf(ignorePipeline, null);
+  }
+
+  /**
+   * Gets KeyInfo with the user given key name.
+   *
+   * @param ignorePipeline   ignore pipeline flag
+   * @param fullKeyName user given key name
+   * @return key info object
+   */
+  private KeyInfo getProtobuf(boolean ignorePipeline, String fullKeyName) {
     long latestVersion = keyLocationVersions.size() == 0 ? -1 :
         keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
 
@@ -402,7 +470,6 @@ public final class OmKeyInfo extends WithObjectID {
     KeyInfo.Builder kb = KeyInfo.newBuilder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
-        .setKeyName(keyName)
         .setDataSize(dataSize)
         .setFactor(factor)
         .setType(type)
@@ -413,7 +480,13 @@ public final class OmKeyInfo extends WithObjectID {
         .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
         .addAllAcls(OzoneAclUtil.toProtobuf(acls))
         .setObjectID(objectID)
-        .setUpdateID(updateID);
+        .setUpdateID(updateID)
+        .setParentID(parentObjectID);
+    if (StringUtils.isNotBlank(fullKeyName)) {
+      kb.setKeyName(fullKeyName);
+    } else {
+      kb.setKeyName(keyName);
+    }
     if (encInfo != null) {
       kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo));
     }
@@ -451,6 +524,11 @@ public final class OmKeyInfo extends WithObjectID {
     if (keyInfo.hasUpdateID()) {
       builder.setUpdateID(keyInfo.getUpdateID());
     }
+    if (keyInfo.hasParentID()) {
+      builder.setParentObjectID(keyInfo.getParentID());
+    }
+    // not persisted to DB. FileName will be filtered out from keyName
+    builder.setFileName(OzoneFSUtils.getFileName(keyInfo.getKeyName()));
     return builder.build();
   }
 
@@ -464,6 +542,8 @@ public final class OmKeyInfo extends WithObjectID {
         ", creationTime='" + creationTime + '\'' +
         ", type='" + type + '\'' +
         ", factor='" + factor + '\'' +
+        ", objectID='" + objectID + '\'' +
+        ", parentID='" + parentObjectID + '\'' +
         '}';
   }
 
@@ -489,12 +569,13 @@ public final class OmKeyInfo extends WithObjectID {
         Objects.equals(metadata, omKeyInfo.metadata) &&
         Objects.equals(acls, omKeyInfo.acls) &&
         objectID == omKeyInfo.objectID &&
-        updateID == omKeyInfo.updateID;
+        updateID == omKeyInfo.updateID &&
+        parentObjectID == omKeyInfo.parentObjectID;
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(volumeName, bucketName, keyName);
+    return Objects.hash(volumeName, bucketName, keyName, parentObjectID);
   }
 
   /**
@@ -511,8 +592,10 @@ public final class OmKeyInfo extends WithObjectID {
         .setReplicationType(type)
         .setReplicationFactor(factor)
         .setFileEncryptionInfo(encInfo)
-        .setObjectID(objectID).setUpdateID(updateID);
-
+        .setObjectID(objectID)
+        .setUpdateID(updateID)
+        .setParentObjectID(parentObjectID)
+        .setFileName(fileName);
 
     keyLocationVersions.forEach(keyLocationVersion ->
         builder.addOmKeyLocationInfoGroup(
@@ -540,4 +623,11 @@ public final class OmKeyInfo extends WithObjectID {
   public void clearFileEncryptionInfo() {
     this.encInfo = null;
   }
+
+  public String getPath() {
+    if (StringUtils.isBlank(getFileName())) {
+      return getKeyName();
+    }
+    return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getFileName();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
new file mode 100644
index 0000000..d097268
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+
+/**
+ * Test verifies the entries and operations in file table, open file table etc.
+ */
+public class TestOzoneFileOps {
+
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestOzoneFileOps.class);
+
+  private MiniOzoneCluster cluster;
+  private FileSystem fs;
+  private String volumeName;
+  private String bucketName;
+
+  @Before
+  public void setupOzoneFileSystem()
+          throws IOException, TimeoutException, InterruptedException {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, false);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .build();
+    cluster.waitForClusterToBeReady();
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+            bucket.getVolumeName());
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+    fs = FileSystem.get(conf);
+  }
+
+  @After
+  public void tearDown() {
+    IOUtils.closeQuietly(fs);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test(timeout = 300_000)
+  public void testCreateFile() throws Exception {
+    // Op 1. create dir -> /d1/d2/d3/d4/
+    Path parent = new Path("/d1/d2/");
+    Path file = new Path(parent, "file1");
+    FSDataOutputStream outputStream = fs.create(file);
+    String openFileKey = "";
+
+    OMMetadataManager omMgr = cluster.getOzoneManager().getMetadataManager();
+    OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(
+            omMgr.getBucketKey(volumeName, bucketName));
+    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
+
+    ArrayList<String> dirKeys = new ArrayList<>();
+    long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1",
+            dirKeys, omMgr);
+    long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys,
+            omMgr);
+    openFileKey = d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
+
+    // verify entries in directory table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmDirectoryInfo>> iterator =
+            omMgr.getDirectoryTable().iterator();
+    iterator.seekToFirst();
+    int count = dirKeys.size();
+    Assert.assertEquals("Unexpected directory table entries!", 2, count);
+    while (iterator.hasNext()) {
+      count--;
+      Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
+      verifyKeyFormat(value.getKey(), dirKeys);
+    }
+    Assert.assertEquals("Unexpected directory table entries!", 0, count);
+
+    // verify entries in open key table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmKeyInfo>> keysItr =
+            omMgr.getOpenKeyTable().iterator();
+    keysItr.seekToFirst();
+
+    while (keysItr.hasNext()) {
+      count++;
+      Table.KeyValue<String, OmKeyInfo> value = keysItr.next();
+      verifyOpenKeyFormat(value.getKey(), openFileKey);
+      verifyOMFileInfoFormat(value.getValue(), file.getName(), d2ObjectID);
+    }
+    Assert.assertEquals("Unexpected file table entries!", 1, count);
+
+    // trigger CommitKeyRequest
+    outputStream.close();
+
+    Assert.assertTrue("Failed to commit the open file:" + openFileKey,
+            omMgr.getOpenKeyTable().isEmpty());
+
+    OmKeyInfo omKeyInfo = omMgr.getKeyTable().get(openFileKey);
+    Assert.assertNotNull("Invalid Key!", omKeyInfo);
+    verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
+  }
+
+  private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName,
+                                      long parentID) {
+    Assert.assertEquals("Wrong keyName", fileName,
+            omKeyInfo.getKeyName());
+    Assert.assertEquals("Wrong parentID", parentID,
+            omKeyInfo.getParentObjectID());
+    String dbKey = parentID + OzoneConsts.OM_KEY_PREFIX + fileName;
+    Assert.assertEquals("Wrong path format", dbKey,
+            omKeyInfo.getPath());
+  }
+
+  /**
+   * Verify key name format and the DB key existence in the expected dirKeys
+   * list.
+   *
+   * @param key     table keyName
+   * @param dirKeys expected keyName
+   */
+  private void verifyKeyFormat(String key, ArrayList<String> dirKeys) {
+    String[] keyParts = StringUtils.split(key,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
+    boolean removed = dirKeys.remove(key);
+    Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
+            removed);
+  }
+
+  /**
+   * Verify key name format and the DB key existence in the expected
+   * openFileKeys list.
+   *
+   * @param key          table keyName
+   * @param openFileKey expected keyName
+   */
+  private void verifyOpenKeyFormat(String key, String openFileKey) {
+    String[] keyParts = StringUtils.split(key,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("Invalid KeyName:" + key, 3, keyParts.length);
+    String[] expectedOpenFileParts = StringUtils.split(openFileKey,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("ParentId/Key:" + expectedOpenFileParts[0]
+                    + " doesn't exists in openFileTable!",
+            expectedOpenFileParts[0] + OzoneConsts.OM_KEY_PREFIX
+                    + expectedOpenFileParts[1],
+            keyParts[0] + OzoneConsts.OM_KEY_PREFIX + keyParts[1]);
+  }
+
+  long verifyDirKey(long parentId, String dirKey, String absolutePath,
+                    ArrayList<String> dirKeys, OMMetadataManager omMgr)
+          throws Exception {
+    String dbKey = parentId + OzoneConsts.OM_KEY_PREFIX + dirKey;
+    dirKeys.add(dbKey);
+    OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey);
+    Assert.assertNotNull("Failed to find " + absolutePath +
+            " using dbKey: " + dbKey, dirInfo);
+    Assert.assertEquals("Parent Id mismatches", parentId,
+            dirInfo.getParentObjectID());
+    Assert.assertEquals("Mismatches directory name", dirKey,
+            dirInfo.getName());
+    Assert.assertTrue("Mismatches directory creation time param",
+            dirInfo.getCreationTime() > 0);
+    Assert.assertEquals("Mismatches directory modification time param",
+            dirInfo.getCreationTime(), dirInfo.getModificationTime());
+    return dirInfo.getObjectID();
+  }
+
+}
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index abbee63..32b3578 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -768,6 +768,7 @@ message KeyInfo {
     repeated OzoneAclInfo acls = 13;
     optional uint64 objectID = 14;
     optional uint64 updateID = 15;
+    optional uint64 parentID = 16;
 }
 
 message DirectoryInfo {
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 4c9025b..b4fad19 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -391,12 +391,23 @@ public interface OMMetadataManager {
   Set<String> listTableNames();
 
   /**
-   * Given a volume, bucket and a key, return the corresponding DB prefixKey
-   * key.
+   * Given parent object id and path component name, return the corresponding
+   * DB 'prefixKey' key.
    *
    * @param parentObjectId - parent object Id
    * @param pathComponentName   - path component name
    * @return DB directory key as String.
    */
   String getOzonePathKey(long parentObjectId, String pathComponentName);
+
+  /**
+   * Returns DB key name of an open file in OM metadata store. Should be
+   * #open# prefix followed by actual leaf node name.
+   *
+   * @param parentObjectId - parent object Id
+   * @param fileName       - file name
+   * @param id             - client id for this open request
+   * @return DB directory key as String.
+   */
+  String getOpenFileName(long parentObjectId, String fileName, long id);
 }
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
index a7e1eab..8a28451 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
@@ -30,6 +30,14 @@ import org.slf4j.LoggerFactory;
 
 /**
  * Codec to encode OmKeyInfo as byte array.
+ *
+ * <p>
+ * If the layout version "ozone.om.layout.version" is V1 and
+ * "ozone.om.enable.filesystem.paths" is TRUE. Then, DB stores only the leaf
+ * node name into the 'keyName' field.
+ * <p>
+ * For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+ * 'keyName' field stores only the leaf node name, which is 'file1'.
  */
 public class OmKeyInfoCodec implements Codec<OmKeyInfo> {
   private static final Logger LOG =
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 3e9a476..2788f8d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
 import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.protocol.proto
     .OzoneManagerProtocolProtos.UserVolumeInfo;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
@@ -129,6 +130,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * |  directoryTable    | parentId/directoryName -> DirectoryInfo         |
    * |----------------------------------------------------------------------|
+   * |  fileTable         | parentId/fileName -> KeyInfo                |
+   * |----------------------------------------------------------------------|
+   * |  openFileTable     | parentId/fileName/id -> KeyInfo                   |
+   * |----------------------------------------------------------------------|
    * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
    * |----------------------------------------------------------------------|
    */
@@ -144,6 +149,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String DELEGATION_TOKEN_TABLE = "dTokenTable";
   public static final String PREFIX_TABLE = "prefixTable";
   public static final String DIRECTORY_TABLE = "directoryTable";
+  public static final String FILE_TABLE = "fileTable";
+  public static final String OPEN_FILE_TABLE = "openFileTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
@@ -163,6 +170,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private Table dTokenTable;
   private Table prefixTable;
   private Table dirTable;
+  private Table fileTable;
+  private Table openFileTable;
   private Table transactionInfoTable;
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
@@ -191,7 +200,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * For subclass overriding.
    */
   protected OmMetadataManagerImpl() {
-    this.lock = new OzoneManagerLock(new OzoneConfiguration());
+    OzoneConfiguration conf = new OzoneConfiguration();
+    this.lock = new OzoneManagerLock(conf);
     this.openKeyExpireThresholdMS =
         OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
   }
@@ -217,6 +227,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmKeyInfo> getKeyTable() {
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return fileTable;
+    }
     return keyTable;
   }
 
@@ -227,6 +240,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
 
   @Override
   public Table<String, OmKeyInfo> getOpenKeyTable() {
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return openFileTable;
+    }
     return openKeyTable;
   }
 
@@ -334,6 +350,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addTable(S3_SECRET_TABLE)
         .addTable(PREFIX_TABLE)
         .addTable(DIRECTORY_TABLE)
+        .addTable(FILE_TABLE)
+        .addTable(OPEN_FILE_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -404,6 +422,14 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
             OmDirectoryInfo.class);
     checkTableStatus(dirTable, DIRECTORY_TABLE);
 
+    fileTable = this.store.getTable(FILE_TABLE, String.class,
+            OmKeyInfo.class);
+    checkTableStatus(fileTable, FILE_TABLE);
+
+    openFileTable = this.store.getTable(OPEN_FILE_TABLE, String.class,
+            OmKeyInfo.class);
+    checkTableStatus(openFileTable, OPEN_FILE_TABLE);
+
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
         String.class, OMTransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
@@ -1159,4 +1185,13 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
     return builder.toString();
   }
 
+  @Override
+  public String getOpenFileName(long parentID, String fileName,
+                                long id) {
+    StringBuilder openKey = new StringBuilder();
+    openKey.append(parentID);
+    openKey.append(OM_KEY_PREFIX).append(fileName);
+    openKey.append(OM_KEY_PREFIX).append(id);
+    return openKey.toString();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 1ea225b..d4c0f17 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -35,9 +35,11 @@ import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
@@ -86,7 +88,7 @@ public final class OzoneManagerRatisUtils {
 
   // TODO: Temporary workaround for OM upgrade path and will be replaced once
   //  upgrade HDDS-3698 story reaches consensus.
-  private static boolean omLayoutVersionV1 = true;
+  private static boolean omLayoutVersionV1 = false;
 
   private OzoneManagerRatisUtils() {
   }
@@ -138,6 +140,9 @@ public final class OzoneManagerRatisUtils {
     case CreateKey:
       return new OMKeyCreateRequest(omRequest);
     case CommitKey:
+      if (omLayoutVersionV1) {
+        return new OMKeyCommitRequestV1(omRequest);
+      }
       return new OMKeyCommitRequest(omRequest);
     case DeleteKey:
       return new OMKeyDeleteRequest(omRequest);
@@ -153,6 +158,9 @@ public final class OzoneManagerRatisUtils {
       }
       return new OMDirectoryCreateRequest(omRequest);
     case CreateFile:
+      if (omLayoutVersionV1) {
+        return new OMFileCreateRequestV1(omRequest);
+      }
       return new OMFileCreateRequest(omRequest);
     case PurgeKeys:
       return new OMKeyPurgeRequest(omRequest);
@@ -326,4 +334,13 @@ public final class OzoneManagerRatisUtils {
 
     return true;
   }
+
+  /**
+   * Returns layout version flag represents V1.
+   * @return
+   */
+  public static boolean isOmLayoutVersionV1() {
+    return omLayoutVersionV1;
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
index b8b5697..4367018 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
@@ -148,8 +148,10 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
           omDirectoryResult == NONE) {
 
         // prepare all missing parents
-        missingParentInfos = OMDirectoryCreateRequestV1.getAllParentDirInfo(
-                ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+        missingParentInfos =
+                OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
+                        ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+
         // prepare leafNode dir
         OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(
                 omPathInfo.getLeafNodeName(),
@@ -230,14 +232,15 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
 
   /**
    * Construct OmDirectoryInfo for every parent directory in missing list.
-   * @param ozoneManager
-   * @param keyArgs
-   * @param pathInfo list of parent directories to be created and its ACLs
-   * @param trxnLogIndex
-   * @return
-   * @throws IOException
+   *
+   * @param ozoneManager Ozone Manager
+   * @param keyArgs      key arguments
+   * @param pathInfo     list of parent directories to be created and its ACLs
+   * @param trxnLogIndex transaction log index id
+   * @return list of missing parent directories
+   * @throws IOException DB failure
    */
-  public static List<OmDirectoryInfo> getAllParentDirInfo(
+  public static List<OmDirectoryInfo> getAllMissingParentDirInfo(
           OzoneManager ozoneManager, KeyArgs keyArgs,
           OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex)
           throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index 9a7f31a..7d104e4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -234,23 +234,10 @@ public class OMFileCreateRequest extends OMKeyRequest {
       List<OzoneAcl> inheritAcls = pathInfo.getAcls();
 
       // Check if a file or directory exists with same key name.
-      if (omDirectoryResult == FILE_EXISTS) {
-        if (!isOverWrite) {
-          throw new OMException("File " + keyName + " already exists",
-              OMException.ResultCodes.FILE_ALREADY_EXISTS);
-        }
-      } else if (omDirectoryResult == DIRECTORY_EXISTS) {
-        throw new OMException("Can not write to directory: " + keyName,
-            OMException.ResultCodes.NOT_A_FILE);
-      } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
-        throw new OMException(
-            "Can not create file: " + keyName + " as there " +
-                "is already file in the given path",
-            OMException.ResultCodes.NOT_A_FILE);
-      }
+      checkDirectoryResult(keyName, isOverWrite, omDirectoryResult);
 
       if (!isRecursive) {
-        checkAllParentsExist(ozoneManager, keyArgs, pathInfo);
+        checkAllParentsExist(keyArgs, pathInfo);
       }
 
       // do open key
@@ -351,8 +338,40 @@ public class OMFileCreateRequest extends OMKeyRequest {
     return omClientResponse;
   }
 
-  private void checkAllParentsExist(OzoneManager ozoneManager,
-      KeyArgs keyArgs,
+  /**
+   * Verify om directory result.
+   *
+   * @param keyName           key name
+   * @param isOverWrite       flag represents whether file can be overwritten
+   * @param omDirectoryResult directory result
+   * @throws OMException if file or directory or file exists in the given path
+   */
+  protected void checkDirectoryResult(String keyName, boolean isOverWrite,
+      OMFileRequest.OMDirectoryResult omDirectoryResult) throws OMException {
+    if (omDirectoryResult == FILE_EXISTS) {
+      if (!isOverWrite) {
+        throw new OMException("File " + keyName + " already exists",
+            OMException.ResultCodes.FILE_ALREADY_EXISTS);
+      }
+    } else if (omDirectoryResult == DIRECTORY_EXISTS) {
+      throw new OMException("Can not write to directory: " + keyName,
+          OMException.ResultCodes.NOT_A_FILE);
+    } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
+      throw new OMException(
+          "Can not create file: " + keyName + " as there " +
+              "is already file in the given path",
+          OMException.ResultCodes.NOT_A_FILE);
+    }
+  }
+
+  /**
+   * Verify the existence of parent directory.
+   *
+   * @param keyArgs  key arguments
+   * @param pathInfo om path info
+   * @throws IOException directory not found
+   */
+  protected void checkAllParentsExist(KeyArgs keyArgs,
       OMFileRequest.OMPathInfo pathInfo) throws IOException {
     String keyName = keyArgs.getKeyName();
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
new file mode 100644
index 0000000..e46416b
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles create file request layout version1.
+ */
+public class OMFileCreateRequestV1 extends OMFileCreateRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMFileCreateRequestV1.class);
+  public OMFileCreateRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
+    KeyArgs keyArgs = createFileRequest.getKeyArgs();
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    // if isRecursive is true, file would be created even if parent
+    // directories does not exist.
+    boolean isRecursive = createFileRequest.getIsRecursive();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("File create for : " + volumeName + "/" + bucketName + "/"
+          + keyName + ":" + isRecursive);
+    }
+
+    // if isOverWrite is true, file would be over written.
+    boolean isOverWrite = createFileRequest.getIsOverwrite();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumCreateFile();
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    boolean acquiredLock = false;
+
+    OmVolumeArgs omVolumeArgs = null;
+    OmBucketInfo omBucketInfo = null;
+    final List<OmKeyLocationInfo> locations = new ArrayList<>();
+    List<OmDirectoryInfo> missingParentInfos;
+    int numKeysCreated = 0;
+
+    OMClientResponse omClientResponse = null;
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    IOException exception = null;
+    Result result = null;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      if (keyName.length() == 0) {
+        // Check if this is the root of the filesystem.
+        throw new OMException("Can not write to directory: " + keyName,
+                OMException.ResultCodes.NOT_A_FILE);
+      }
+
+      // check Acl
+      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+      // acquire lock
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      OmKeyInfo dbFileInfo = null;
+
+      OMFileRequest.OMPathInfoV1 pathInfoV1 =
+              OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+                      volumeName, bucketName, keyName, Paths.get(keyName));
+
+      if (pathInfoV1.getDirectoryResult()
+              == OMFileRequest.OMDirectoryResult.FILE_EXISTS) {
+        String dbFileKey = omMetadataManager.getOzonePathKey(
+                pathInfoV1.getLastKnownParentId(),
+                pathInfoV1.getLeafNodeName());
+        dbFileInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+                omMetadataManager, dbFileKey, keyName);
+      }
+
+      // check if the file or directory already existed in OM
+      checkDirectoryResult(keyName, isOverWrite,
+              pathInfoV1.getDirectoryResult());
+
+      if (!isRecursive) {
+        checkAllParentsExist(keyArgs, pathInfoV1);
+      }
+
+      // add all missing parents to dir table
+      missingParentInfos =
+              OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
+                      ozoneManager, keyArgs, pathInfoV1, trxnLogIndex);
+
+      // total number of keys created.
+      numKeysCreated = missingParentInfos.size();
+
+      // do open key
+      OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
+          omMetadataManager.getBucketKey(volumeName, bucketName));
+
+      OmKeyInfo omFileInfo = prepareFileInfo(omMetadataManager, keyArgs,
+              dbFileInfo, keyArgs.getDataSize(), locations,
+              getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(),
+              bucketInfo, pathInfoV1, trxnLogIndex,
+              ozoneManager.isRatisEnabled());
+
+      long openVersion = omFileInfo.getLatestVersionLocations().getVersion();
+      long clientID = createFileRequest.getClientID();
+      String dbOpenFileName = omMetadataManager.getOpenFileName(
+              pathInfoV1.getLastKnownParentId(), pathInfoV1.getLeafNodeName(),
+              clientID);
+
+      // Append new blocks
+      List<OmKeyLocationInfo> newLocationList = keyArgs.getKeyLocationsList()
+          .stream().map(OmKeyLocationInfo::getFromProtobuf)
+          .collect(Collectors.toList());
+      omFileInfo.appendNewBlocks(newLocationList, false);
+
+      omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+      // check volume quota
+      long preAllocatedSpace = newLocationList.size()
+          * ozoneManager.getScmBlockSize()
+          * omFileInfo.getFactor().getNumber();
+      checkVolumeQuotaInBytes(omVolumeArgs, preAllocatedSpace);
+
+      // Add to cache entry can be done outside of lock for this openKey.
+      // Even if bucket gets deleted, when commitKey we shall identify if
+      // bucket gets deleted.
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
+              dbOpenFileName, omFileInfo, pathInfoV1.getLeafNodeName(),
+              trxnLogIndex);
+
+      // Add cache entries for the prefix directories.
+      // Skip adding for the file key itself, until Key Commit.
+      OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+              Optional.absent(), Optional.of(missingParentInfos),
+              trxnLogIndex);
+
+      // update usedBytes atomically.
+      omVolumeArgs.getUsedBytes().add(preAllocatedSpace);
+      omBucketInfo.getUsedBytes().add(preAllocatedSpace);
+
+      // Prepare response. Sets user given full key name in the 'keyName'
+      // attribute in response object.
+      omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
+          .setKeyInfo(omFileInfo.getProtobuf(keyName))
+          .setID(clientID)
+          .setOpenVersion(openVersion).build())
+          .setCmdType(Type.CreateFile);
+      omClientResponse = new OMFileCreateResponseV1(omResponse.build(),
+              omFileInfo, missingParentInfos, clientID, omVolumeArgs,
+              omBucketInfo);
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omMetrics.incNumCreateFileFails();
+      omResponse.setCmdType(Type.CreateFile);
+      omClientResponse = new OMFileCreateResponse(createErrorOMResponse(
+            omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+          omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+            bucketName);
+      }
+    }
+
+    // Audit Log outside the lock
+    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+        OMAction.CREATE_FILE, auditMap, exception,
+        getOmRequest().getUserInfo()));
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.incNumKeys(numKeysCreated);
+      LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName,
+          bucketName, keyName);
+      break;
+    case FAILURE:
+      LOG.error("File create failed. Volume:{}, Bucket:{}, Key{}.",
+          volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMFileCreateRequest: {}",
+          createFileRequest);
+    }
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index aa9592c..833c564 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -27,6 +27,7 @@ import java.util.List;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -36,6 +37,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.jetbrains.annotations.Nullable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -427,7 +429,7 @@ public final class OMFileRequest {
   /**
    * Adding directory info to the Table cache.
    *
-   * @param omMetadataManager  OM Metdata Manager
+   * @param omMetadataManager  OM Metadata Manager
    * @param dirInfo            directory info
    * @param missingParentInfos list of the parents to be added to DB
    * @param trxnLogIndex       transaction log index
@@ -453,4 +455,132 @@ public final class OMFileRequest {
     }
   }
 
+  /**
+   * Adding Key info to the openFile Table cache.
+   *
+   * @param omMetadataManager OM Metadata Manager
+   * @param dbOpenFileName    open file name key
+   * @param omFileInfo        key info
+   * @param fileName          file name
+   * @param trxnLogIndex      transaction log index
+   * @return dbOmFileInfo, which keeps leaf node name in keyName field
+   */
+  public static void addOpenFileTableCacheEntry(
+          OMMetadataManager omMetadataManager, String dbOpenFileName,
+          @Nullable OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+
+    Optional<OmKeyInfo> keyInfoOptional = Optional.absent();
+    if (omFileInfo != null) {
+      // New key format for the openFileTable.
+      // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+      // keyName field stores only the leaf node name, which is 'file1'.
+      omFileInfo.setKeyName(fileName);
+      keyInfoOptional = Optional.of(omFileInfo);
+    }
+
+    omMetadataManager.getOpenKeyTable().addCacheEntry(
+            new CacheKey<>(dbOpenFileName),
+            new CacheValue<>(keyInfoOptional, trxnLogIndex));
+  }
+
+  /**
+   * Adding Key info to the file table cache.
+   *
+   * @param omMetadataManager OM Metadata Manager
+   * @param dbFileKey         file name key
+   * @param omFileInfo        key info
+   * @param fileName          file name
+   * @param trxnLogIndex      transaction log index
+   * @return dbOmFileInfo, which keeps leaf node name in keyName field
+   */
+  public static void addFileTableCacheEntry(
+          OMMetadataManager omMetadataManager, String dbFileKey,
+          OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+
+    // New key format for the fileTable.
+    // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+    // keyName field stores only the leaf node name, which is 'file1'.
+    omFileInfo.setKeyName(fileName);
+
+    omMetadataManager.getKeyTable().addCacheEntry(
+            new CacheKey<>(dbFileKey),
+            new CacheValue<>(Optional.of(omFileInfo), trxnLogIndex));
+  }
+
+  /**
+   * Adding omKeyInfo to open file table.
+   *
+   * @param omMetadataMgr    OM Metadata Manager
+   * @param batchOp          batch of db operations
+   * @param omFileInfo       omKeyInfo
+   * @param openKeySessionID clientID
+   * @throws IOException DB failure
+   */
+  public static void addToOpenFileTable(OMMetadataManager omMetadataMgr,
+                                        BatchOperation batchOp,
+                                        OmKeyInfo omFileInfo,
+                                        long openKeySessionID)
+          throws IOException {
+
+    String dbOpenFileKey = omMetadataMgr.getOpenFileName(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName(),
+            openKeySessionID);
+
+    omMetadataMgr.getOpenKeyTable().putWithBatch(batchOp, dbOpenFileKey,
+            omFileInfo);
+  }
+
+  /**
+   * Adding omKeyInfo to file table.
+   *
+   * @param omMetadataMgr
+   * @param batchOp
+   * @param omFileInfo
+   * @throws IOException
+   */
+  public static void addToFileTable(OMMetadataManager omMetadataMgr,
+                                    BatchOperation batchOp,
+                                    OmKeyInfo omFileInfo)
+          throws IOException {
+
+    String dbFileKey = omMetadataMgr.getOzonePathKey(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName());
+
+    omMetadataMgr.getKeyTable().putWithBatch(batchOp,
+            dbFileKey, omFileInfo);
+  }
+
+  /**
+   * Gets om key info from open key table if openFileTable flag is true,
+   * otherwise get it from key table.
+   *
+   * @param openFileTable if true add KeyInfo to openFileTable, otherwise to
+   *                      fileTable
+   * @param omMetadataMgr OM Metadata Manager
+   * @param dbOpenFileKey open file kaye name in DB
+   * @param keyName       key name
+   * @return om key info
+   * @throws IOException DB failure
+   */
+  public static OmKeyInfo getOmKeyInfoFromFileTable(boolean openFileTable,
+      OMMetadataManager omMetadataMgr, String dbOpenFileKey, String keyName)
+          throws IOException {
+
+    OmKeyInfo dbOmKeyInfo;
+    if (openFileTable) {
+      dbOmKeyInfo = omMetadataMgr.getOpenKeyTable().get(dbOpenFileKey);
+    } else {
+      dbOmKeyInfo = omMetadataMgr.getKeyTable().get(dbOpenFileKey);
+    }
+
+    // DB OMKeyInfo will store only fileName into keyName field. This
+    // function is to set user given keyName into the OmKeyInfo object.
+    // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+    // keyName field stores only the leaf node name, which is 'file1'.
+    if (dbOmKeyInfo != null) {
+      dbOmKeyInfo.setKeyName(keyName);
+    }
+    return dbOmKeyInfo;
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index b1d47de..32ecc2c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -228,6 +228,30 @@ public class OMKeyCommitRequest extends OMKeyRequest {
     auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
           exception, getOmRequest().getUserInfo()));
 
+    processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics,
+            exception, omKeyInfo, result);
+
+    return omClientResponse;
+  }
+
+  /**
+   * Process result of om request execution.
+   *
+   * @param commitKeyRequest commit key request
+   * @param volumeName       volume name
+   * @param bucketName       bucket name
+   * @param keyName          key name
+   * @param omMetrics        om metrics
+   * @param exception        exception trace
+   * @param omKeyInfo        omKeyInfo
+   * @param result           stores the result of the execution
+   */
+  @SuppressWarnings("parameternumber")
+  protected void processResult(CommitKeyRequest commitKeyRequest,
+                               String volumeName, String bucketName,
+                               String keyName, OMMetrics omMetrics,
+                               IOException exception, OmKeyInfo omKeyInfo,
+                               Result result) {
     switch (result) {
     case SUCCESS:
       // As when we commit the key, then it is visible in ozone, so we should
@@ -239,18 +263,16 @@ public class OMKeyCommitRequest extends OMKeyRequest {
         omMetrics.incNumKeys();
       }
       LOG.debug("Key committed. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
+              bucketName, keyName);
       break;
     case FAILURE:
-      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}.",
-          volumeName, bucketName, keyName, exception);
+      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}. Exception:{}",
+              volumeName, bucketName, keyName, exception);
       omMetrics.incNumKeyCommitFails();
       break;
     default:
       LOG.error("Unrecognized Result for OMKeyCommitRequest: {}",
-          commitKeyRequest);
+              commitKeyRequest);
     }
-
-    return omClientResponse;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
similarity index 54%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
index b1d47de..e985a9a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -18,87 +18,57 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseV1;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 
 /**
- * Handles CommitKey request.
+ * Handles CommitKey request layout version V1.
  */
-public class OMKeyCommitRequest extends OMKeyRequest {
+public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(OMKeyCommitRequest.class);
+          LoggerFactory.getLogger(OMKeyCommitRequestV1.class);
 
-  public OMKeyCommitRequest(OMRequest omRequest) {
+  public OMKeyCommitRequestV1(OMRequest omRequest) {
     super(omRequest);
   }
 
   @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
-    Preconditions.checkNotNull(commitKeyRequest);
-
-    KeyArgs keyArgs = commitKeyRequest.getKeyArgs();
-
-    // Verify key name
-    final boolean checkKeyNameEnabled = ozoneManager.getConfiguration()
-         .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY,
-                 OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT);
-    if(checkKeyNameEnabled){
-      OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(),
-              OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX));
-    }
-
-    KeyArgs.Builder newKeyArgs =
-        keyArgs.toBuilder().setModificationTime(Time.now())
-            .setKeyName(validateAndNormalizeKey(
-                ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName()));
-
-    return getOmRequest().toBuilder()
-        .setCommitKeyRequest(commitKeyRequest.toBuilder()
-            .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build();
-  }
-
-  @Override
   @SuppressWarnings("methodlength")
   public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
       long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
@@ -119,7 +89,7 @@ public class OMKeyCommitRequest extends OMKeyRequest {
     Map<String, String> auditMap = buildKeyArgsAuditMap(commitKeyArgs);
 
     OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
-        getOmRequest());
+            getOmRequest());
 
     IOException exception = null;
     OmKeyInfo omKeyInfo = null;
@@ -138,14 +108,13 @@ public class OMKeyCommitRequest extends OMKeyRequest {
 
       // check Acl
       checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName,
-          keyName, IAccessAuthorizer.ACLType.WRITE,
-          commitKeyRequest.getClientID());
+              keyName, IAccessAuthorizer.ACLType.WRITE,
+              commitKeyRequest.getClientID());
 
-      String dbOzoneKey =
-          omMetadataManager.getOzoneKey(volumeName, bucketName,
-              keyName);
-      String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-          keyName, commitKeyRequest.getClientID());
+
+      String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+      Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+      String dbOpenFileKey = null;
 
       List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
       for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) {
@@ -153,25 +122,25 @@ public class OMKeyCommitRequest extends OMKeyRequest {
       }
 
       bucketLockAcquired =
-          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
-              volumeName, bucketName);
+              omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+                      volumeName, bucketName);
 
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
-      // Check for directory exists with same name, if it exists throw error. 
-      if (ozoneManager.getEnableFileSystemPaths()) {
-        if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
-            omMetadataManager)) {
-          throw new OMException("Can not create file: " + keyName +
-              " as there is already directory in the given path", NOT_A_FILE);
-        }
-      }
-
-
-      omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey);
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
+      long bucketId = omBucketInfo.getObjectID();
+      long parentID = getParentID(bucketId, pathComponents, keyName,
+              omMetadataManager, ozoneManager);
+      String dbFileKey = omMetadataManager.getOzonePathKey(parentID, fileName);
+      dbOpenFileKey = omMetadataManager.getOpenFileName(parentID, fileName,
+              commitKeyRequest.getClientID());
+
+      omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true,
+              omMetadataManager, dbOpenFileKey, keyName);
       if (omKeyInfo == null) {
-        throw new OMException("Failed to commit key, as " + dbOpenKey +
-            "entry is not found in the OpenKey table", KEY_NOT_FOUND);
+        throw new OMException("Failed to commit key, as " + dbOpenFileKey +
+                "entry is not found in the OpenKey table", KEY_NOT_FOUND);
       }
       omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
 
@@ -184,73 +153,120 @@ public class OMKeyCommitRequest extends OMKeyRequest {
       omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
 
       // Add to cache of open key table and key table.
-      omMetadataManager.getOpenKeyTable().addCacheEntry(
-          new CacheKey<>(dbOpenKey),
-          new CacheValue<>(Optional.absent(), trxnLogIndex));
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, dbFileKey,
+              null, fileName, trxnLogIndex);
 
-      omMetadataManager.getKeyTable().addCacheEntry(
-          new CacheKey<>(dbOzoneKey),
-          new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      OMFileRequest.addFileTableCacheEntry(omMetadataManager, dbFileKey,
+              omKeyInfo, fileName, trxnLogIndex);
 
       long scmBlockSize = ozoneManager.getScmBlockSize();
       int factor = omKeyInfo.getFactor().getNumber();
       omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
-      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
       // update usedBytes atomically.
       // Block was pre-requested and UsedBytes updated when createKey and
       // AllocatedBlock. The space occupied by the Key shall be based on
       // the actual Key size, and the total Block size applied before should
       // be subtracted.
       long correctedSpace = omKeyInfo.getDataSize() * factor -
-          locationInfoList.size() * scmBlockSize * factor;
+              locationInfoList.size() * scmBlockSize * factor;
       omVolumeArgs.getUsedBytes().add(correctedSpace);
       omBucketInfo.getUsedBytes().add(correctedSpace);
 
-      omClientResponse = new OMKeyCommitResponse(omResponse.build(),
-          omKeyInfo, dbOzoneKey, dbOpenKey, omVolumeArgs, omBucketInfo);
+      omClientResponse = new OMKeyCommitResponseV1(omResponse.build(),
+              omKeyInfo, dbFileKey, dbOpenFileKey, omVolumeArgs, omBucketInfo);
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new OMKeyCommitResponse(createErrorOMResponse(
-          omResponse, exception));
+      omClientResponse = new OMKeyCommitResponseV1(createErrorOMResponse(
+              omResponse, exception));
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
-          omDoubleBufferHelper);
+              omDoubleBufferHelper);
 
       if(bucketLockAcquired) {
         omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
-            bucketName);
+                bucketName);
       }
     }
 
     auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
-          exception, getOmRequest().getUserInfo()));
-
-    switch (result) {
-    case SUCCESS:
-      // As when we commit the key, then it is visible in ozone, so we should
-      // increment here.
-      // As key also can have multiple versions, we need to increment keys
-      // only if version is 0. Currently we have not complete support of
-      // versioning of keys. So, this can be revisited later.
-      if (omKeyInfo.getKeyLocationVersions().size() == 1) {
-        omMetrics.incNumKeys();
+            exception, getOmRequest().getUserInfo()));
+
+    processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics,
+            exception, omKeyInfo, result);
+
+    return omClientResponse;
+  }
+
+
+  /**
+   * Check for directory exists with same name, if it exists throw error.
+   *
+   * @param keyName                  key name
+   * @param ozoneManager             Ozone Manager
+   * @param reachedLastPathComponent true if the path component is a fileName
+   * @throws IOException if directory exists with same name
+   */
+  private void checkDirectoryAlreadyExists(String keyName,
+                                           OzoneManager ozoneManager,
+                                           boolean reachedLastPathComponent)
+          throws IOException {
+    // Reached last component, which would be a file. Returns its parentID.
+    if (reachedLastPathComponent && ozoneManager.getEnableFileSystemPaths()) {
+      throw new OMException("Can not create file: " + keyName +
+              " as there is already directory in the given path", NOT_A_FILE);
+    }
+  }
+
+  /**
+   * Get parent id for the user given path.
+   *
+   * @param bucketId          bucket id
+   * @param pathComponents    fie path elements
+   * @param keyName           user given key name
+   * @param omMetadataManager metadata manager
+   * @return lastKnownParentID
+   * @throws IOException DB failure or parent not exists in DirectoryTable
+   */
+  private long getParentID(long bucketId, Iterator<Path> pathComponents,
+                           String keyName, OMMetadataManager omMetadataManager,
+                           OzoneManager ozoneManager)
+          throws IOException {
+
+    long lastKnownParentId = bucketId;
+
+    // If no sub-dirs then bucketID is the root/parent.
+    if(!pathComponents.hasNext()){
+      return bucketId;
+    }
+
+    OmDirectoryInfo omDirectoryInfo;
+    while (pathComponents.hasNext()) {
+      String nodeName = pathComponents.next().toString();
+      boolean reachedLastPathComponent = !pathComponents.hasNext();
+      String dbNodeName =
+              omMetadataManager.getOzonePathKey(lastKnownParentId, nodeName);
+
+      omDirectoryInfo = omMetadataManager.
+              getDirectoryTable().get(dbNodeName);
+      if (omDirectoryInfo != null) {
+        checkDirectoryAlreadyExists(keyName, ozoneManager,
+                reachedLastPathComponent);
+        lastKnownParentId = omDirectoryInfo.getObjectID();
+      } else {
+        // One of the sub-dir doesn't exists in DB. Immediate parent should
+        // exists for committing the key, otherwise will fail the operation.
+        if (!reachedLastPathComponent) {
+          throw new OMException("Failed to commit key, as parent directory of "
+                  + keyName + " entry is not found in DirectoryTable",
+                  KEY_NOT_FOUND);
+        }
+        break;
       }
-      LOG.debug("Key committed. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
-      break;
-    case FAILURE:
-      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}.",
-          volumeName, bucketName, keyName, exception);
-      omMetrics.incNumKeyCommitFails();
-      break;
-    default:
-      LOG.error("Unrecognized Result for OMKeyCommitRequest: {}",
-          commitKeyRequest);
     }
 
-    return omClientResponse;
+    return lastKnownParentId;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index ee48f9b..2f10d2b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -251,40 +251,6 @@ public abstract class OMKeyRequest extends OMClientRequest {
     return edek;
   }
 
-  /**
-   * Create OmKeyInfo object.
-   * @return OmKeyInfo
-   */
-  @SuppressWarnings("parameterNumber")
-  protected OmKeyInfo createKeyInfo(@Nonnull KeyArgs keyArgs,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      @Nonnull HddsProtos.ReplicationFactor factor,
-      @Nonnull HddsProtos.ReplicationType type, long size,
-      @Nullable FileEncryptionInfo encInfo,
-      @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo,
-        long transactionLogIndex) {
-    long objectID = OMFileRequest.getObjIDFromTxId(transactionLogIndex);
-
-    return new OmKeyInfo.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, locations)))
-        .setCreationTime(keyArgs.getModificationTime())
-        .setModificationTime(keyArgs.getModificationTime())
-        .setDataSize(size)
-        .setReplicationType(type)
-        .setReplicationFactor(factor)
-        .setFileEncryptionInfo(encInfo)
-        .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
-        .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
-        .setObjectID(objectID)
-        .setUpdateID(transactionLogIndex)
-        .build();
-  }
-
   private List< OzoneAcl > getAclsForKey(KeyArgs keyArgs,
       OmBucketInfo bucketInfo, PrefixManager prefixManager) {
     List<OzoneAcl> acls = new ArrayList<>();
@@ -338,76 +304,10 @@ public abstract class OMKeyRequest extends OMClientRequest {
       @Nullable OmBucketInfo omBucketInfo,
       long transactionLogIndex, boolean isRatisEnabled)
       throws IOException {
-    if (keyArgs.getIsMultipartKey()) {
-      return prepareMultipartKeyInfo(omMetadataManager, keyArgs,
-          size, locations, encInfo, prefixManager, omBucketInfo,
-          transactionLogIndex);
-      //TODO args.getMetadata
-    }
-    if (dbKeyInfo != null) {
-      // TODO: Need to be fixed, as when key already exists, we are
-      //  appending new blocks to existing key.
-      // The key already exist, the new blocks will be added as new version
-      // when locations.size = 0, the new version will have identical blocks
-      // as its previous version
-      dbKeyInfo.addNewVersion(locations, false);
-      dbKeyInfo.setDataSize(size + dbKeyInfo.getDataSize());
-      // The modification time is set in preExecute. Use the same
-      // modification time.
-      dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
-      dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
-      return dbKeyInfo;
-    }
 
-    // the key does not exist, create a new object.
-    // Blocks will be appended as version 0.
-    return createKeyInfo(keyArgs, locations, keyArgs.getFactor(),
-        keyArgs.getType(), keyArgs.getDataSize(), encInfo, prefixManager,
-        omBucketInfo, transactionLogIndex);
-  }
-
-  /**
-   * Prepare OmKeyInfo for multi-part upload part key which will be persisted
-   * to openKeyTable.
-   * @return OmKeyInfo
-   * @throws IOException
-   */
-  @SuppressWarnings("parameternumber")
-  private OmKeyInfo prepareMultipartKeyInfo(
-      @Nonnull OMMetadataManager omMetadataManager,
-      @Nonnull KeyArgs args, long size,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      FileEncryptionInfo encInfo,  @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo, @Nonnull long transactionLogIndex)
-      throws IOException {
-    HddsProtos.ReplicationFactor factor;
-    HddsProtos.ReplicationType type;
-
-    Preconditions.checkArgument(args.getMultipartNumber() > 0,
-        "PartNumber Should be greater than zero");
-    // When key is multipart upload part key, we should take replication
-    // type and replication factor from original key which has done
-    // initiate multipart upload. If we have not found any such, we throw
-    // error no such multipart upload.
-    String uploadID = args.getMultipartUploadID();
-    Preconditions.checkNotNull(uploadID);
-    String multipartKey = omMetadataManager
-        .getMultipartKey(args.getVolumeName(), args.getBucketName(),
-            args.getKeyName(), uploadID);
-    OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
-        multipartKey);
-    if (partKeyInfo == null) {
-      throw new OMException("No such Multipart upload is with specified " +
-          "uploadId " + uploadID,
-          OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-    } else {
-      factor = partKeyInfo.getFactor();
-      type = partKeyInfo.getType();
-    }
-    // For this upload part we don't need to check in KeyTable. As this
-    // is not an actual key, it is a part of the key.
-    return createKeyInfo(args, locations, factor, type, size, encInfo,
-        prefixManager, omBucketInfo, transactionLogIndex);
+    return prepareFileInfo(omMetadataManager, keyArgs, dbKeyInfo, size,
+            locations, encInfo, prefixManager, omBucketInfo, null,
+            transactionLogIndex, isRatisEnabled);
   }
 
   /**
@@ -428,7 +328,6 @@ public abstract class OMKeyRequest extends OMClientRequest {
     }
   }
 
-
   /**
    * Check Acls for the ozone key.
    * @param ozoneManager
@@ -684,4 +583,139 @@ public abstract class OMKeyRequest extends OMClientRequest {
         new CacheKey<>(omMetadataManager.getBucketKey(volume, bucket)))
         .getCacheValue();
   }
+
+  /**
+   * Prepare OmKeyInfo which will be persisted to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  protected OmKeyInfo prepareFileInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          @Nullable FileEncryptionInfo encInfo,
+          @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          OMFileRequest.OMPathInfoV1 omPathInfo,
+          long transactionLogIndex, boolean isRatisEnabled)
+          throws IOException {
+    if (keyArgs.getIsMultipartKey()) {
+      return prepareMultipartFileInfo(omMetadataManager, keyArgs,
+              size, locations, encInfo, prefixManager, omBucketInfo,
+              omPathInfo, transactionLogIndex);
+      //TODO args.getMetadata
+    }
+    if (dbKeyInfo != null) {
+      // TODO: Need to be fixed, as when key already exists, we are
+      //  appending new blocks to existing key.
+      // The key already exist, the new blocks will be added as new version
+      // when locations.size = 0, the new version will have identical blocks
+      // as its previous version
+      dbKeyInfo.addNewVersion(locations, false);
+      dbKeyInfo.setDataSize(size + dbKeyInfo.getDataSize());
+      // The modification time is set in preExecute. Use the same
+      // modification time.
+      dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
+      dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
+      return dbKeyInfo;
+    }
+
+    // the key does not exist, create a new object.
+    // Blocks will be appended as version 0.
+    return createFileInfo(keyArgs, locations, keyArgs.getFactor(),
+            keyArgs.getType(), keyArgs.getDataSize(), encInfo, prefixManager,
+            omBucketInfo, omPathInfo, transactionLogIndex);
+  }
+
+  /**
+   * Create OmKeyInfo object.
+   * @return OmKeyInfo
+   */
+  @SuppressWarnings("parameterNumber")
+  protected OmKeyInfo createFileInfo(@Nonnull KeyArgs keyArgs,
+      @Nonnull List<OmKeyLocationInfo> locations,
+      @Nonnull HddsProtos.ReplicationFactor factor,
+      @Nonnull HddsProtos.ReplicationType type, long size,
+      @Nullable FileEncryptionInfo encInfo,
+      @Nonnull PrefixManager prefixManager,
+      @Nullable OmBucketInfo omBucketInfo,
+      OMFileRequest.OMPathInfoV1 omPathInfo,
+      long transactionLogIndex) {
+
+    OmKeyInfo.Builder builder = new OmKeyInfo.Builder();
+    builder.setVolumeName(keyArgs.getVolumeName())
+            .setBucketName(keyArgs.getBucketName())
+            .setKeyName(keyArgs.getKeyName())
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, locations)))
+            .setCreationTime(keyArgs.getModificationTime())
+            .setModificationTime(keyArgs.getModificationTime())
+            .setDataSize(size)
+            .setReplicationType(type)
+            .setReplicationFactor(factor)
+            .setFileEncryptionInfo(encInfo)
+            .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
+            .addAllMetadata(KeyValueUtil.getFromProtobuf(
+                    keyArgs.getMetadataList()))
+            .setUpdateID(transactionLogIndex);
+    long objectID;
+    if (omPathInfo == null) {
+      // KeyTable metadata format
+      objectID = OMFileRequest.getObjIDFromTxId(transactionLogIndex);
+    } else {
+      // FileTable metadata format
+      objectID = omPathInfo.getLeafNodeObjectId();
+      builder.setParentObjectID(omPathInfo.getLastKnownParentId());
+      builder.setFileName(omPathInfo.getLeafNodeName());
+    }
+    builder.setObjectID(objectID);
+    return builder.build();
+  }
+
+  /**
+   * Prepare OmKeyInfo for multi-part upload part key which will be persisted
+   * to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  private OmKeyInfo prepareMultipartFileInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs args, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          FileEncryptionInfo encInfo,  @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          OMFileRequest.OMPathInfoV1 omPathInfo,
+          @Nonnull long transactionLogIndex)
+          throws IOException {
+    HddsProtos.ReplicationFactor factor;
+    HddsProtos.ReplicationType type;
+
+    Preconditions.checkArgument(args.getMultipartNumber() > 0,
+            "PartNumber Should be greater than zero");
+    // When key is multipart upload part key, we should take replication
+    // type and replication factor from original key which has done
+    // initiate multipart upload. If we have not found any such, we throw
+    // error no such multipart upload.
+    String uploadID = args.getMultipartUploadID();
+    Preconditions.checkNotNull(uploadID);
+    String multipartKey = omMetadataManager
+            .getMultipartKey(args.getVolumeName(), args.getBucketName(),
+                    args.getKeyName(), uploadID);
+    OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
+            multipartKey);
+    if (partKeyInfo == null) {
+      throw new OMException("No such Multipart upload is with specified " +
+              "uploadId " + uploadID,
+              OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
+    } else {
+      factor = partKeyInfo.getFactor();
+      type = partKeyInfo.getType();
+    }
+    // For this upload part we don't need to check in KeyTable. As this
+    // is not an actual key, it is a part of the key.
+    return createFileInfo(args, locations, factor, type, size, encInfo,
+            prefixManager, omBucketInfo, omPathInfo, transactionLogIndex);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
new file mode 100644
index 0000000..a168d8f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for create file request layout version V1.
+ */
+@CleanupTableInfo(cleanupTables = OPEN_FILE_TABLE)
+public class OMFileCreateResponseV1 extends OMFileCreateResponse {
+
+  private List<OmDirectoryInfo> parentDirInfos;
+
+  public OMFileCreateResponseV1(@Nonnull OMResponse omResponse,
+                                @Nonnull OmKeyInfo omKeyInfo,
+                                @Nonnull List<OmDirectoryInfo> parentDirInfos,
+                                long openKeySessionID,
+                                @Nonnull OmVolumeArgs omVolumeArgs,
+                                @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, new ArrayList<>(), openKeySessionID,
+        omVolumeArgs, omBucketInfo);
+    this.parentDirInfos = parentDirInfos;
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataMgr,
+                              BatchOperation batchOp) throws IOException {
+
+    /**
+     * Create parent directory entries during Key Create - do not wait
+     * for Key Commit request.
+     * XXX handle stale directory entries.
+     */
+    if (parentDirInfos != null) {
+      for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+        String parentKey = parentDirInfo.getPath();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("putWithBatch adding parent : key {} info : {}", parentKey,
+                  parentDirInfo);
+        }
+        omMetadataMgr.getDirectoryTable().putWithBatch(batchOp, parentKey,
+                parentDirInfo);
+      }
+    }
+
+    OMFileRequest.addToOpenFileTable(omMetadataMgr, batchOp, getOmKeyInfo(),
+            getOpenKeySessionID());
+
+    // update volume usedBytes.
+    omMetadataMgr.getVolumeTable().putWithBatch(batchOp,
+            omMetadataMgr.getVolumeKey(getOmVolumeArgs().getVolume()),
+            getOmVolumeArgs());
+    // update bucket usedBytes.
+    omMetadataMgr.getBucketTable().putWithBatch(batchOp,
+            omMetadataMgr.getBucketKey(getOmVolumeArgs().getVolume(),
+                    getOmBucketInfo().getBucketName()), getOmBucketInfo());
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
index aede2ec..6661900 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
@@ -86,4 +86,23 @@ public class OMKeyCommitResponse extends OMClientResponse {
             omBucketInfo.getBucketName()), omBucketInfo);
   }
 
+  protected String getOpenKeyName() {
+    return openKeyName;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmVolumeArgs getOmVolumeArgs() {
+    return omVolumeArgs;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
+
+  protected String getOzoneKeyName() {
+    return ozoneKeyName;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
similarity index 59%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
index aede2ec..bff5554 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
@@ -18,72 +18,64 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
-import java.io.IOException;
 import javax.annotation.Nonnull;
 
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
 
 /**
- * Response for CommitKey request.
+ * Response for CommitKey request layout version V1.
  */
-@CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE, KEY_TABLE})
-public class OMKeyCommitResponse extends OMClientResponse {
-
-  private OmKeyInfo omKeyInfo;
-  private String ozoneKeyName;
-  private String openKeyName;
-  private OmVolumeArgs omVolumeArgs;
-  private OmBucketInfo omBucketInfo;
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, FILE_TABLE})
+public class OMKeyCommitResponseV1 extends OMKeyCommitResponse {
 
-  public OMKeyCommitResponse(@Nonnull OMResponse omResponse,
-      @Nonnull OmKeyInfo omKeyInfo, String ozoneKeyName, String openKeyName,
-      @Nonnull OmVolumeArgs omVolumeArgs, @Nonnull OmBucketInfo omBucketInfo) {
-    super(omResponse);
-    this.omKeyInfo = omKeyInfo;
-    this.ozoneKeyName = ozoneKeyName;
-    this.openKeyName = openKeyName;
-    this.omVolumeArgs = omVolumeArgs;
-    this.omBucketInfo = omBucketInfo;
+  public OMKeyCommitResponseV1(@Nonnull OMResponse omResponse,
+                               @Nonnull OmKeyInfo omKeyInfo,
+                               String ozoneKeyName, String openKeyName,
+                               @Nonnull OmVolumeArgs omVolumeArgs,
+                               @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, ozoneKeyName, openKeyName, omVolumeArgs,
+            omBucketInfo);
   }
 
   /**
    * For when the request is not successful.
    * For a successful request, the other constructor should be used.
    */
-  public OMKeyCommitResponse(@Nonnull OMResponse omResponse) {
+  public OMKeyCommitResponseV1(@Nonnull OMResponse omResponse) {
     super(omResponse);
     checkStatusNotOK();
   }
 
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
-      BatchOperation batchOperation) throws IOException {
+                           BatchOperation batchOperation) throws IOException {
 
     // Delete from OpenKey table
     omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-        openKeyName);
+            getOpenKeyName());
 
-    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName,
-        omKeyInfo);
+    OMFileRequest.addToFileTable(omMetadataManager, batchOperation,
+            getOmKeyInfo());
 
     // update volume usedBytes.
     omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
-        omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
-        omVolumeArgs);
+            omMetadataManager.getVolumeKey(getOmVolumeArgs().getVolume()),
+            getOmVolumeArgs());
     // update bucket usedBytes.
     omMetadataManager.getBucketTable().putWithBatch(batchOperation,
-        omMetadataManager.getBucketKey(omVolumeArgs.getVolume(),
-            omBucketInfo.getBucketName()), omBucketInfo);
+            omMetadataManager.getBucketKey(getOmVolumeArgs().getVolume(),
+                    getOmBucketInfo().getBucketName()), getOmBucketInfo());
   }
-
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
index 2ae5359..49ff8d9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
@@ -72,7 +72,7 @@ public class OMKeyCreateResponse extends OMClientResponse {
   }
 
   @Override
-  protected void addToDBBatch(OMMetadataManager omMetadataManager,
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
     /**
@@ -108,5 +108,21 @@ public class OMKeyCreateResponse extends OMClientResponse {
         omMetadataManager.getBucketKey(omVolumeArgs.getVolume(),
             omBucketInfo.getBucketName()), omBucketInfo);
   }
+
+  protected long getOpenKeySessionID() {
+    return openKeySessionID;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmVolumeArgs getOmVolumeArgs() {
+    return omVolumeArgs;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index acfbb6e..d019729 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType;
 import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType;
 
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -810,4 +811,113 @@ public final class TestOMRequestUtils {
         new CacheKey<>(dbVolumeKey),
         new CacheValue<>(Optional.of(omVolumeArgs), 1L));
   }
+
+  /**
+   * Create OmKeyInfo.
+   */
+  @SuppressWarnings("parameterNumber")
+  public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
+      String keyName, HddsProtos.ReplicationType replicationType,
+      HddsProtos.ReplicationFactor replicationFactor, long objectID,
+      long parentID, long trxnLogIndex, long creationTime) {
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return new OmKeyInfo.Builder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(keyName)
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+            .setCreationTime(creationTime)
+            .setModificationTime(Time.now())
+            .setDataSize(1000L)
+            .setReplicationType(replicationType)
+            .setReplicationFactor(replicationFactor)
+            .setObjectID(objectID)
+            .setUpdateID(trxnLogIndex)
+            .setParentObjectID(parentID)
+            .setFileName(fileName)
+            .build();
+  }
+
+
+  /**
+   * Add key entry to KeyTable. if openKeyTable flag is true, add's entries
+   * to openKeyTable, else add's it to keyTable.
+   *
+   * @throws Exception DB failure
+   */
+  public static void addFileToKeyTable(boolean openKeyTable,
+                                       boolean addToCache, String fileName,
+                                       OmKeyInfo omKeyInfo,
+                                       long clientID, long trxnLogIndex,
+                                       OMMetadataManager omMetadataManager)
+          throws Exception {
+    if (openKeyTable) {
+      String ozoneKey = omMetadataManager.getOpenFileName(
+              omKeyInfo.getParentObjectID(), fileName, clientID);
+      if (addToCache) {
+        omMetadataManager.getOpenKeyTable().addCacheEntry(
+                new CacheKey<>(ozoneKey),
+                new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      }
+      omMetadataManager.getOpenKeyTable().put(ozoneKey, omKeyInfo);
+    } else {
+      String ozoneKey = omMetadataManager.getOzonePathKey(
+              omKeyInfo.getParentObjectID(), fileName);
+      if (addToCache) {
+        omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
+                new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      }
+      omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+    }
+  }
+
+  /**
+   * Gets bucketId from OM metadata manager.
+   *
+   * @param volumeName        volume name
+   * @param bucketName        bucket name
+   * @param omMetadataManager metadata manager
+   * @return bucket Id
+   * @throws Exception DB failure
+   */
+  public static long getBucketId(String volumeName, String bucketName,
+                                 OMMetadataManager omMetadataManager)
+          throws Exception {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    return omBucketInfo.getObjectID();
+  }
+
+  /**
+   * Add path components to the directory table and returns last directory's
+   * object id.
+   *
+   * @param volumeName volume name
+   * @param bucketName bucket name
+   * @param key        key name
+   * @param omMetaMgr  metdata manager
+   * @return last directory object id
+   * @throws Exception
+   */
+  public static long addParentsToDirTable(String volumeName, String bucketName,
+                                    String key, OMMetadataManager omMetaMgr)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetaMgr);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long objectId = bucketId + 10;
+    long parentId = bucketId;
+    long txnID = 50;
+    for (String pathElement : pathComponents) {
+      OmDirectoryInfo omDirInfo =
+              TestOMRequestUtils.createOmDirectoryInfo(pathElement, ++objectId,
+                      parentId);
+      TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo,
+              txnID, omMetaMgr);
+      parentId = omDirInfo.getObjectID();
+    }
+    return parentId;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
index 77cf74b..f0f0320 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -88,6 +88,7 @@ public class TestOMDirectoryCreateRequestV1 {
     OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
             folder.newFolder().getAbsolutePath());
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
     when(ozoneManager.getMetrics()).thenReturn(omMetrics);
     when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
index c7aa6be..5010d0a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.request.file;
 import java.util.List;
 import java.util.UUID;
 
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -55,8 +56,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
         false, false);
 
-    OMFileCreateRequest omFileCreateRequest =
-        new OMFileCreateRequest(omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
     Assert.assertNotEquals(omRequest, modifiedOmRequest);
@@ -96,8 +96,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
         false, false);
 
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
     Assert.assertNotEquals(omRequest, modifiedOmRequest);
@@ -121,21 +120,17 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
     long id = modifiedOmRequest.getCreateFileRequest().getClientID();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
-
     // Before calling
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(keyName, id, false);
     Assert.assertNull(omKeyInfo);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -146,8 +141,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
 
     // Check open table whether key is added or not.
 
-    omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-    Assert.assertNotNull(omKeyInfo);
+    omKeyInfo = verifyPathInOpenKeyTable(keyName, id, true);
 
     List< OmKeyLocationInfo > omKeyLocationInfoList =
         omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -179,12 +173,11 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
             false, true);
 
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -200,13 +193,11 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         false, true);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
-
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -311,8 +302,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
     testNonRecursivePath(key, false, false, true);
   }
 
-
-  private void testNonRecursivePath(String key,
+  protected void testNonRecursivePath(String key,
       boolean overWrite, boolean recursive, boolean fail) throws Exception {
     OMRequest omRequest = createFileRequest(volumeName, bucketName, key,
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
@@ -320,12 +310,11 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -341,10 +330,9 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
       Assert.assertTrue(omFileCreateResponse.getOMResponse().getSuccess());
       long id = modifiedOmRequest.getCreateFileRequest().getClientID();
 
-      String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-          key, id);
-      OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-      Assert.assertNotNull(omKeyInfo);
+      verifyKeyNameInCreateFileResponse(key, omFileCreateResponse);
+
+      OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(key, id, true);
 
       List< OmKeyLocationInfo > omKeyLocationInfoList =
           omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -368,6 +356,14 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
     }
   }
 
+  private void verifyKeyNameInCreateFileResponse(String key,
+      OMClientResponse omFileCreateResponse) {
+    OzoneManagerProtocolProtos.CreateFileResponse createFileResponse =
+            omFileCreateResponse.getOMResponse().getCreateFileResponse();
+    String actualFileName = createFileResponse.getKeyInfo().getKeyName();
+    Assert.assertEquals("Incorrect keyName", key, actualFileName);
+  }
+
   /**
    * Create OMRequest which encapsulates OMFileCreateRequest.
    * @param volumeName
@@ -377,7 +373,8 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
    * @param replicationType
    * @return OMRequest
    */
-  private OMRequest createFileRequest(
+  @NotNull
+  protected OMRequest createFileRequest(
       String volumeName, String bucketName, String keyName,
       HddsProtos.ReplicationFactor replicationFactor,
       HddsProtos.ReplicationType replicationType, boolean overWrite,
@@ -399,4 +396,38 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest {
         .setCreateFileRequest(createFileRequest).build();
 
   }
+
+  /**
+   * Verify path in open key table. Also, it returns OMKeyInfo for the given
+   * key path.
+   *
+   * @param key      key name
+   * @param id       client id
+   * @param doAssert if true then do assertion, otherwise it just skip.
+   * @return om key info for the given key path.
+   * @throws Exception DB failure
+   */
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+                                               boolean doAssert)
+          throws Exception {
+    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
+            key, id);
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    if (doAssert) {
+      Assert.assertNotNull("Failed to find key in OpenKeyTable", omKeyInfo);
+    }
+    return omKeyInfo;
+  }
+
+  /**
+   * Gets OMFileCreateRequest reference.
+   *
+   * @param omRequest om request
+   * @return OMFileCreateRequest reference
+   */
+  @NotNull
+  protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest){
+    return new OMFileCreateRequest(omRequest);
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
new file mode 100644
index 0000000..7ded386
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.UUID;
+
+/**
+ * Tests OMFileCreateRequest layout version V1.
+ */
+public class TestOMFileCreateRequestV1 extends TestOMFileCreateRequest {
+
+  @Test
+  public void testValidateAndUpdateCacheWithNonRecursive() throws Exception {
+    testNonRecursivePath(UUID.randomUUID().toString(), false, false, false);
+    testNonRecursivePath("a/b", false, false, true);
+    Assert.assertEquals("Invalid metrics value", 0, omMetrics.getNumKeys());
+
+    // Create parent dirs for the path
+    TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            "a/b/c", omMetadataManager);
+    String fileNameD = "d";
+    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
+            "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+
+    // cannot create file if directory of same name exists
+    testNonRecursivePath("a/b/c", false, false, true);
+
+    // Delete child key but retain path "a/b/ in the key table
+    OmDirectoryInfo dirPathC = getDirInfo("a/b/c");
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+    String dbFileD = omMetadataManager.getOzonePathKey(
+            dirPathC.getObjectID(), fileNameD);
+    omMetadataManager.getKeyTable().delete(dbFileD);
+    omMetadataManager.getKeyTable().delete(dirPathC.getPath());
+
+    // can create non-recursive because parents already exist.
+    testNonRecursivePath("a/b/e", false, false, false);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithRecursiveAndOverWrite()
+          throws Exception {
+    String key = "c/d/e/f";
+    // Should be able to create file even if parent directories does not exist
+    testNonRecursivePath(key, false, true, false);
+    Assert.assertEquals("Invalid metrics value", 3, omMetrics.getNumKeys());
+
+    // Add the key to key table
+    OmDirectoryInfo omDirInfo = getDirInfo("c/d/e");
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    omDirInfo.getObjectID() + 10,
+                    omDirInfo.getObjectID(), 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            "f", omKeyInfo, -1,
+            omDirInfo.getObjectID() + 10, omMetadataManager);
+
+    // Even if key exists, should be able to create file as overwrite is set
+    // to true
+    testNonRecursivePath(key, true, true, false);
+    testNonRecursivePath(key, false, true, true);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite()
+          throws Exception {
+    String parentDir = "c/d/e";
+    String fileName = "f";
+    String key = parentDir + "/" + fileName;
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, parentDir, omMetadataManager);
+
+    // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is
+    // non-recursive parent should exist.
+    testNonRecursivePath(key, false, false, false);
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            fileName, omKeyInfo, -1, 50, omMetadataManager);
+
+    // Even if key exists in KeyTable, should be able to create file as
+    // overwrite is set to true
+    testNonRecursivePath(key, true, false, false);
+    testNonRecursivePath(key, false, false, true);
+  }
+
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+                                             boolean doAssert)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      if (indx == pathComponents.length - 1) {
+        String dbOpenFileName = omMetadataManager.getOpenFileName(
+                parentId, pathElement, id);
+        OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+                .get(dbOpenFileName);
+        if (doAssert) {
+          Assert.assertNotNull("Invalid key!", omKeyInfo);
+        }
+        return omKeyInfo;
+      } else {
+        // directory
+        String dbKey = omMetadataManager.getOzonePathKey(parentId,
+                pathElement);
+        OmDirectoryInfo dirInfo =
+                omMetadataManager.getDirectoryTable().get(dbKey);
+        parentId = dirInfo.getObjectID();
+      }
+    }
+    if (doAssert) {
+      Assert.fail("Invalid key!");
+    }
+    return null;
+  }
+
+  private OmDirectoryInfo getDirInfo(String key)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    OmDirectoryInfo dirInfo = null;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      // directory
+      String dbKey = omMetadataManager.getOzonePathKey(parentId,
+              pathElement);
+      dirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      parentId = dirInfo.getObjectID();
+    }
+    return dirInfo;
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+
+  protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) {
+    return new OMFileCreateRequestV1(omRequest);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
index b327b76..09d499e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
@@ -19,12 +19,15 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -48,6 +51,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
  */
 public class TestOMKeyCommitRequest extends TestOMKeyRequest {
 
+  private String parentDir;
+
   @Test
   public void testPreExecute() throws Exception {
     doPreExecute(createCommitKeyRequest());
@@ -56,20 +61,15 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
   @Test
   public void testValidateAndUpdateCache() throws Exception {
 
-    OMRequest modifiedOmRequest =
-        doPreExecute(createCommitKeyRequest());
+    OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = addKeyToOpenKeyTable();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -92,6 +92,8 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
     omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
 
     Assert.assertNotNull(omKeyInfo);
+    // DB keyInfo format
+    verifyKeyName(omKeyInfo);
 
     // Check modification time
 
@@ -107,7 +109,14 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
 
     Assert.assertEquals(locationInfoListFromCommitKeyRequest,
         omKeyInfo.getLatestVersionLocations().getLocationList());
+  }
 
+  @Test
+  public void testValidateAndUpdateCacheWithSubDirs() throws Exception {
+    parentDir = "dir1/dir2/dir3/";
+    keyName = parentDir + UUID.randomUUID().toString();
+
+    testValidateAndUpdateCache();
   }
 
   @Test
@@ -117,10 +126,9 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
+            getOmKeyCommitRequest(modifiedOmRequest);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -147,13 +155,11 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
         omMetadataManager);
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -180,14 +186,12 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -216,7 +220,7 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
   private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(originalOMRequest);
+            getOmKeyCommitRequest(originalOMRequest);
 
     OMRequest modifiedOmRequest = omKeyCommitRequest.preExecute(ozoneManager);
 
@@ -294,4 +298,34 @@ public class TestOMKeyCommitRequest extends TestOMKeyRequest {
     return keyLocations;
   }
 
+  protected String getParentDir() {
+    return parentDir;
+  }
+
+  @NotNull
+  protected String getOzonePathKey() throws IOException {
+    return omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+  }
+
+  @NotNull
+  protected String addKeyToOpenKeyTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
+            clientID, replicationType, replicationFactor, omMetadataManager);
+
+    return getOzonePathKey();
+  }
+
+  @NotNull
+  protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+    return new OMKeyCommitRequest(omRequest);
+  }
+
+  protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+    Assert.assertEquals("Incorrect KeyName", keyName,
+            omKeyInfo.getKeyName());
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Assert.assertEquals("Incorrect FileName", fileName,
+            omKeyInfo.getFileName());
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
new file mode 100644
index 0000000..f5168e1
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.io.IOException;
+
+/**
+ * Class tests OMKeyCommitRequestV1 class layout version V1.
+ */
+public class TestOMKeyCommitRequestV1 extends TestOMKeyCommitRequest {
+
+  private long parentID = Long.MIN_VALUE;
+
+  private long getBucketID() throws java.io.IOException {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    if(omBucketInfo!= null){
+      return omBucketInfo.getObjectID();
+    }
+    // bucket doesn't exists in DB
+    return Long.MIN_VALUE;
+  }
+
+  @Override
+  protected String getOzonePathKey() throws IOException {
+    long bucketID = getBucketID();
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return omMetadataManager.getOzonePathKey(bucketID, fileName);
+  }
+
+  @Override
+  protected String addKeyToOpenKeyTable() throws Exception {
+    // need to initialize parentID
+    if (getParentDir() == null) {
+      parentID = getBucketID();
+    } else {
+      parentID = TestOMRequestUtils.addParentsToDirTable(volumeName,
+              bucketName, getParentDir(), omMetadataManager);
+    }
+    long objectId = 100;
+
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+                    Time.now());
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+
+    return omMetadataManager.getOzonePathKey(parentID, fileName);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+
+  @NotNull
+  protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+    return new OMKeyCommitRequestV1(omRequest);
+  }
+
+  protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+    // V1 format - stores fileName in the keyName DB field.
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Assert.assertEquals("Incorrect FileName", fileName,
+            omKeyInfo.getFileName());
+    Assert.assertEquals("Incorrect KeyName", fileName,
+            omKeyInfo.getKeyName());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
index 116ba5c..5f15c50 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.om.KeyManagerImpl;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -89,6 +90,7 @@ public class TestOMKeyRequest {
   protected long clientID;
   protected long scmBlockSize = 1000L;
   protected long dataSize;
+  protected long txnLogId = 100000L;
 
   // Just setting ozoneManagerDoubleBuffer which does nothing.
   protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
@@ -101,7 +103,7 @@ public class TestOMKeyRequest {
   public void setup() throws Exception {
     ozoneManager = Mockito.mock(OzoneManager.class);
     omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    OzoneConfiguration ozoneConfiguration = getOzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
         folder.newFolder().getAbsolutePath());
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
@@ -169,6 +171,11 @@ public class TestOMKeyRequest {
         .thenReturn(new ResolvedBucket(volumeAndBucket, volumeAndBucket));
   }
 
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @After
   public void stop() {
     omMetrics.unRegister();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
new file mode 100644
index 0000000..d2ab465
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
+import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests MKeyCreateResponse layout version V1.
+ */
+public class TestOMFileCreateResponseV1 extends TestOMKeyCreateResponse {
+
+  @NotNull
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(omBucketInfo);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
+  }
+
+  @NotNull
+  @Override
+  protected String getOpenKeyName() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOpenFileName(
+            omBucketInfo.getObjectID(), keyName, clientID);
+  }
+
+  @NotNull
+  @Override
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmVolumeArgs volumeArgs, OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMFileCreateResponseV1(response, keyInfo, null, clientID,
+            volumeArgs, bucketInfo);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
index ab425f2..a8b3147 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.response.key;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -31,20 +32,23 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 /**
  * Tests OMKeyCommitResponse.
  */
+@SuppressWarnings("visibilitymodifier")
 public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
+  protected OmBucketInfo omBucketInfo;
+
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
     OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
         .setOwnerName(keyName).setAdminName(keyName)
         .setVolume(volumeName).setCreationTime(Time.now()).build();
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse(
             OzoneManagerProtocolProtos.CommitKeyResponse.getDefaultInstance())
@@ -54,17 +58,14 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
     // As during commit Key, entry will be already there in openKeyTable.
     // Adding it here.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKeyName();
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-    OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
-        omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs, omBucketInfo);
+    String ozoneKey = getOzoneKey();
+    OMKeyCommitResponse omKeyCommitResponse = getOmKeyCommitResponse(
+            omVolumeArgs, omKeyInfo, omResponse, openKey, ozoneKey);
 
     omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation);
 
@@ -73,8 +74,7 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
     // When key commit key is deleted from openKey table and added to keyTable.
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
   }
 
   @Test
@@ -85,7 +85,7 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
     OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
         .setOwnerName(keyName).setAdminName(keyName)
         .setVolume(volumeName).setCreationTime(Time.now()).build();
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
@@ -96,18 +96,15 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
             .setCmdType(OzoneManagerProtocolProtos.Type.CommitKey)
             .build();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String openKey = getOpenKeyName();
+    String ozoneKey = getOzoneKey();
 
-    OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
-        omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs, omBucketInfo);
+    OMKeyCommitResponse omKeyCommitResponse = getOmKeyCommitResponse(
+            omVolumeArgs, omKeyInfo, omResponse, openKey, ozoneKey);
 
     // As during commit Key, entry will be already there in openKeyTable.
     // Adding it here.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable();
 
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
@@ -120,7 +117,30 @@ public class TestOMKeyCommitResponse extends TestOMKeyResponse {
     // As omResponse is error it is a no-op. So, entry should still be in
     // openKey table.
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey));
+  }
+
+  @NotNull
+  protected void addKeyToOpenKeyTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
+            clientID, replicationType, replicationFactor, omMetadataManager);
+  }
+
+  @NotNull
+  protected String getOzoneKey() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOzoneKey(volumeName,
+            omBucketInfo.getBucketName(), keyName);
+  }
+
+  @NotNull
+  protected OMKeyCommitResponse getOmKeyCommitResponse(
+          OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
+          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+          String ozoneKey) {
+    Assert.assertNotNull(omBucketInfo);
+    return new OMKeyCommitResponse(
+            omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
+            omBucketInfo);
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
new file mode 100644
index 0000000..369faa9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests OMKeyCommitResponse layout version V1.
+ */
+public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
+
+  @NotNull
+  protected OMKeyCommitResponse getOmKeyCommitResponse(
+          OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
+          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+          String ozoneKey) {
+    Assert.assertNotNull(omBucketInfo);
+    return new OMKeyCommitResponseV1(
+            omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
+            omBucketInfo);
+  }
+
+  @NotNull
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(omBucketInfo);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
+  }
+
+  @NotNull
+  @Override
+  protected void addKeyToOpenKeyTable() throws Exception {
+    Assert.assertNotNull(omBucketInfo);
+    long parentID = omBucketInfo.getObjectID();
+    long objectId = parentID + 10;
+
+    OmKeyInfo omKeyInfoV1 =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+                    Time.now());
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+  }
+
+  @NotNull
+  @Override
+  protected String getOpenKeyName() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOpenFileName(
+            omBucketInfo.getObjectID(), keyName, clientID);
+  }
+
+  @NotNull
+  @Override
+  protected String getOzoneKey() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOzonePathKey(omBucketInfo.getObjectID(),
+            keyName);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return config;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
index 6357000..8b2dc8a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
@@ -21,11 +21,11 @@ package org.apache.hadoop.ozone.om.response.key;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .CreateKeyResponse;
@@ -41,16 +41,15 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
     OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
         .setOwnerName(keyName).setAdminName(keyName)
         .setVolume(volumeName).setCreationTime(Time.now()).build();
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
                 CreateKeyResponse.getDefaultInstance())
             .setStatus(OzoneManagerProtocolProtos.Status.OK)
@@ -58,11 +57,11 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
             .build();
 
     OMKeyCreateResponse omKeyCreateResponse =
-        new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
-            omVolumeArgs, omBucketInfo);
+            getOmKeyCreateResponse(omKeyInfo, omVolumeArgs, omBucketInfo,
+                    omResponse);
+
+    String openKey = getOpenKeyName();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
     omKeyCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
 
@@ -74,16 +73,16 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
 
   @Test
   public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
 
     OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
         .setOwnerName(keyName).setAdminName(keyName)
         .setVolume(volumeName).setCreationTime(Time.now()).build();
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
         CreateKeyResponse.getDefaultInstance())
         .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
@@ -91,12 +90,11 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
         .build();
 
     OMKeyCreateResponse omKeyCreateResponse =
-        new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
-            omVolumeArgs, omBucketInfo);
+            getOmKeyCreateResponse(omKeyInfo, omVolumeArgs, omBucketInfo,
+                    omResponse);
 
     // Before calling addToDBBatch
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKeyName();
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
     omKeyCreateResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
@@ -108,4 +106,12 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
   }
+
+  @NotNull
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmVolumeArgs volumeArgs, OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMKeyCreateResponse(response, keyInfo, null, clientID,
+            volumeArgs, bucketInfo);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
index 1ad4c70..af5449d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
@@ -20,6 +20,10 @@ package org.apache.hadoop.ozone.om.response.key;
 
 import java.util.UUID;
 
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -49,11 +53,13 @@ public class TestOMKeyResponse {
   protected String keyName;
   protected HddsProtos.ReplicationFactor replicationFactor;
   protected HddsProtos.ReplicationType replicationType;
+  protected OmBucketInfo omBucketInfo;
   protected long clientID;
+  protected long txnLogId = 100000L;
 
   @Before
   public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    OzoneConfiguration ozoneConfiguration = getOzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
         folder.newFolder().getAbsolutePath());
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
@@ -67,6 +73,23 @@ public class TestOMKeyResponse {
     clientID = 1000L;
   }
 
+  @NotNull
+  protected String getOpenKeyName() {
+    return omMetadataManager.getOpenKey(volumeName, bucketName, keyName,
+            clientID);
+  }
+
+  @NotNull
+  protected OmKeyInfo getOmKeyInfo() {
+    return TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+            replicationType, replicationFactor);
+  }
+
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @After
   public void stop() {
     Mockito.framework().clearInlineMocks();


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 03/03: HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 181bb34318e4e9f5064db42fc8aa0af8aec579c5
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Sat Oct 31 06:32:23 2020 +0530

    HDDS-4332: ListFileStatus - do lookup in directory and file tables (#1503)
---
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  31 ++
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   |  18 +-
 .../hadoop/fs/ozone/TestOzoneFileInterfacesV1.java |  66 ++++
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  78 ++--
 .../hadoop/fs/ozone/TestOzoneFileSystemV1.java     | 380 ++++++++++++++++++++
 .../ozone/freon/TestHadoopDirTreeGenerator.java    |  22 +-
 .../ozone/freon/TestHadoopDirTreeGeneratorV1.java  |  33 ++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 398 +++++++++++++++++++++
 .../request/file/OMDirectoryCreateRequestV1.java   |  11 +-
 .../om/request/file/OMFileCreateRequestV1.java     |   4 +-
 .../ozone/om/request/file/OMFileRequest.java       | 117 +++++-
 11 files changed, 1122 insertions(+), 36 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index 96df56f..63bfd8f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.util.StringUtils;
 import javax.annotation.Nonnull;
 import java.nio.file.Paths;
 
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 
 /**
@@ -131,4 +132,34 @@ public final class OzoneFSUtils {
     // failed to converts a path key
     return keyName;
   }
+
+  /**
+   * Verifies whether the childKey is an immediate path under the given
+   * parentKey.
+   *
+   * @param parentKey parent key name
+   * @param childKey  child key name
+   * @return true if childKey is an immediate path under the given parentKey
+   */
+  public static boolean isImmediateChild(String parentKey, String childKey) {
+
+    // Empty childKey has no parent, so just returning false.
+    if (org.apache.commons.lang3.StringUtils.isBlank(childKey)) {
+      return false;
+    }
+    java.nio.file.Path parentPath = Paths.get(parentKey);
+    java.nio.file.Path childPath = Paths.get(childKey);
+
+    java.nio.file.Path childParent = childPath.getParent();
+    // Following are the valid parentKey formats:
+    // parentKey="" or parentKey="/" or parentKey="/a" or parentKey="a"
+    // Following are the valid childKey formats:
+    // childKey="/" or childKey="/a/b" or childKey="a/b"
+    if (org.apache.commons.lang3.StringUtils.isBlank(parentKey)) {
+      return childParent == null ||
+              OM_KEY_PREFIX.equals(childParent.toString());
+    }
+
+    return parentPath.equals(childParent);
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
index 2b8803e..1e31234 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
@@ -56,6 +56,8 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
+
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Assert;
 
@@ -120,7 +122,8 @@ public class TestOzoneFileInterfaces {
 
   private OMMetrics omMetrics;
 
-  private boolean enableFileSystemPaths;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected boolean enableFileSystemPaths;
 
   public TestOzoneFileInterfaces(boolean setDefaultFs,
       boolean useAbsolutePath, boolean enabledFileSystemPaths) {
@@ -135,9 +138,8 @@ public class TestOzoneFileInterfaces {
     volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
     bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
 
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-        enableFileSystemPaths);
+    OzoneConfiguration conf = getOzoneConfiguration();
+
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .build();
@@ -162,6 +164,14 @@ public class TestOzoneFileInterfaces {
     omMetrics = cluster.getOzoneManager().getMetrics();
   }
 
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+        enableFileSystemPaths);
+    return conf;
+  }
+
   @After
   public void teardown() throws IOException {
     if (cluster != null) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
new file mode 100644
index 0000000..93473be
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesV1.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Test OzoneFileSystem Interfaces layout version V1.
+ *
+ * This test will test the various interfaces i.e.
+ * create, read, write, getFileStatus
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneFileInterfacesV1 extends TestOzoneFileInterfaces {
+
+  public TestOzoneFileInterfacesV1(boolean setDefaultFs,
+      boolean useAbsolutePath, boolean enabledFileSystemPaths) {
+    super(setDefaultFs, useAbsolutePath, enabledFileSystemPaths);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+            enableFileSystemPaths);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return conf;
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testDirectory() {
+
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testOzFsReadWrite() {
+
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 46c0115..0e422fe 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -64,6 +64,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import org.apache.hadoop.test.LambdaTestUtils;
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Rule;
@@ -100,17 +101,26 @@ public class TestOzoneFileSystem {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestOzoneFileSystem.class);
 
-  private boolean enabledFileSystemPaths;
-
-  private MiniOzoneCluster cluster;
-  private FileSystem fs;
-  private OzoneFileSystem o3fs;
-  private String volumeName;
-  private String bucketName;
-  private int rootItemCount;
-  private Trash trash;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected boolean enabledFileSystemPaths;
+
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected MiniOzoneCluster cluster;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected FileSystem fs;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected OzoneFileSystem o3fs;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected String volumeName;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected String bucketName;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected int rootItemCount;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected Trash trash;
 
   public void testCreateFileShouldCheckExistenceOfDirWithSameName()
+
       throws Exception {
     /*
      * Op 1. create file -> /d1/d2/d3/d4/key2
@@ -156,6 +166,28 @@ public class TestOzoneFileSystem {
       // ignore as its expected
     }
 
+    // Directory
+    FileStatus fileStatus = fs.getFileStatus(parent);
+    assertEquals("FileStatus did not return the directory",
+            "/d1/d2/d3/d4", fileStatus.getPath().toUri().getPath());
+    assertTrue("FileStatus did not return the directory",
+            fileStatus.isDirectory());
+
+    // invalid sub directory
+    try{
+      fs.getFileStatus(new Path("/d1/d2/d3/d4/key3/invalid"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
+    // invalid file name
+    try{
+      fs.getFileStatus(new Path("/d1/d2/d3/d4/invalidkey"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
+
     // Cleanup
     fs.delete(new Path("/d1/"), true);
   }
@@ -249,12 +281,9 @@ public class TestOzoneFileSystem {
     }
   }
 
-  private void setupOzoneFileSystem()
+  protected void setupOzoneFileSystem()
       throws IOException, TimeoutException, InterruptedException {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-        enabledFileSystemPaths);
+    OzoneConfiguration conf = getOzoneConfig();
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .build();
@@ -276,7 +305,16 @@ public class TestOzoneFileSystem {
     trash = new Trash(conf);
   }
 
-  private void testOzoneFsServiceLoader() throws IOException {
+  @NotNull
+  protected OzoneConfiguration getOzoneConfig() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+            enabledFileSystemPaths);
+    return conf;
+  }
+
+  protected void testOzoneFsServiceLoader() throws IOException {
     assertEquals(
         FileSystem.getFileSystemClass(OzoneConsts.OZONE_URI_SCHEME, null),
         OzoneFileSystem.class);
@@ -441,7 +479,7 @@ public class TestOzoneFileSystem {
 
   }
 
-  private void testListStatus() throws Exception {
+  protected void testListStatus() throws Exception {
     Path parent = new Path("/testListStatus");
     Path file1 = new Path(parent, "key1");
     Path file2 = new Path(parent, "key2");
@@ -489,7 +527,7 @@ public class TestOzoneFileSystem {
   /**
    * Tests listStatus operation on root directory.
    */
-  private void testListStatusOnRoot() throws Exception {
+  protected void testListStatusOnRoot() throws Exception {
     Path root = new Path("/");
     Path dir1 = new Path(root, "dir1");
     Path dir12 = new Path(dir1, "dir12");
@@ -516,7 +554,7 @@ public class TestOzoneFileSystem {
   /**
    * Tests listStatus operation on root directory.
    */
-  private void testListStatusOnLargeDirectory() throws Exception {
+  protected void testListStatusOnLargeDirectory() throws Exception {
     Path root = new Path("/");
     Set<String> paths = new TreeSet<>();
     int numDirs = 5111;
@@ -540,7 +578,7 @@ public class TestOzoneFileSystem {
   /**
    * Tests listStatus on a path with subdirs.
    */
-  private void testListStatusOnSubDirs() throws Exception {
+  protected void testListStatusOnSubDirs() throws Exception {
     // Create the following key structure
     //      /dir1/dir11/dir111
     //      /dir1/dir12
@@ -653,7 +691,7 @@ public class TestOzoneFileSystem {
     GenericTestUtils.assertExceptionContains("KEY_NOT_FOUND", ex);
   }
 
-  private void testGetDirectoryModificationTime()
+  protected void testGetDirectoryModificationTime()
       throws IOException, InterruptedException {
     Path mdir1 = new Path("/mdir1");
     Path mdir11 = new Path(mdir1, "mdir11");
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
new file mode 100644
index 0000000..415aec8
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -0,0 +1,380 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+/**
+ * Ozone file system tests that are not covered by contract tests,
+ * layout version V1.
+ *
+ * Note: When adding new test(s), please append it in testFileSystem() to
+ * avoid test run time regression.
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
+
+  public TestOzoneFileSystemV1(boolean setDefaultFs) {
+    super(setDefaultFs);
+  }
+  /**
+   * Set a timeout for each test.
+   */
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestOzoneFileSystemV1.class);
+
+  private void testListStatusWithoutRecursiveSearch() throws Exception {
+    /*
+     * Op 1. create file -> /key1
+     * Op 2. create dir -> /d1/d2
+     * Op 3. create dir -> /d1/d3
+     * Op 4. create dir -> /d1/d4
+     * Op 5. create file -> /d1/key1
+     * Op 6. create file -> /d2/key1
+     * Op 7. create file -> /d1/d2/key1
+     */
+    Path key1 = new Path("/key1");
+    try (FSDataOutputStream outputStream = fs.create(key1, false)) {
+      assertNotNull("Should be able to create file: key1",
+              outputStream);
+    }
+    Path d1 = new Path("/d1");
+    Path dir1Key1 = new Path(d1, "key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir1Key1,
+              outputStream);
+    }
+    Path d2 = new Path("/d2");
+    Path dir2Key1 = new Path(d2, "key1");
+    try (FSDataOutputStream outputStream = fs.create(dir2Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir2Key1,
+              outputStream);
+    }
+    Path dir1Dir2 = new Path("/d1/d2/");
+    Path dir1Dir2Key1 = new Path(dir1Dir2, "key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Dir2Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir1Dir2Key1,
+              outputStream);
+    }
+    Path d1Key2 = new Path(d1, "key2");
+    try (FSDataOutputStream outputStream = fs.create(d1Key2, false)) {
+      assertNotNull("Should be able to create file: " + d1Key2,
+              outputStream);
+    }
+
+    Path dir1Dir3 = new Path("/d1/d3/");
+    Path dir1Dir4 = new Path("/d1/d4/");
+
+    fs.mkdirs(dir1Dir3);
+    fs.mkdirs(dir1Dir4);
+
+    // Root Directory
+    FileStatus[] fileStatusList = fs.listStatus(new Path("/"));
+    assertEquals("FileStatus should return files and directories",
+            3, fileStatusList.length);
+    ArrayList<String> expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d2");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-1 sub-dirs
+    fileStatusList = fs.listStatus(new Path("/d1"));
+    assertEquals("FileStatus should return files and directories",
+            5, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d3");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d4");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/key1");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/key2");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-2 sub-dirs
+    fileStatusList = fs.listStatus(new Path("/d1/d2"));
+    assertEquals("FileStatus should return files and directories",
+            1, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2/" +
+            "key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-2 key2
+    fileStatusList = fs.listStatus(new Path("/d1/d2/key1"));
+    assertEquals("FileStatus should return files and directories",
+            1, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2/" +
+            "key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // invalid root key
+    try {
+      fileStatusList = fs.listStatus(new Path("/key2"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
+    try {
+      fileStatusList = fs.listStatus(new Path("/d1/d2/key2"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+
+    }
+  }
+
+  private void testListFilesRecursive() throws Exception {
+    /*
+     * Op 1. create file -> /d1/d1/d2/key1
+     * Op 2. create dir -> /key1
+     * Op 3. create dir -> /key2
+     * Op 4. create dir -> /d1/d2/d1/d2/key1
+     */
+    Path dir1Dir1Dir2Key1 = new Path("/d1/d1/d2/key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Dir1Dir2Key1,
+            false)) {
+      assertNotNull("Should be able to create file: " + dir1Dir1Dir2Key1,
+              outputStream);
+    }
+    Path key1 = new Path("/key1");
+    try (FSDataOutputStream outputStream = fs.create(key1, false)) {
+      assertNotNull("Should be able to create file: " + key1,
+              outputStream);
+    }
+    Path key2 = new Path("/key2");
+    try (FSDataOutputStream outputStream = fs.create(key2, false)) {
+      assertNotNull("Should be able to create file: key2",
+              outputStream);
+    }
+    Path dir1Dir2Dir1Dir2Key1 = new Path("/d1/d2/d1/d2/key1");
+    try (FSDataOutputStream outputStream = fs.create(dir1Dir2Dir1Dir2Key1,
+            false)) {
+      assertNotNull("Should be able to create file: "
+              + dir1Dir2Dir1Dir2Key1, outputStream);
+    }
+    RemoteIterator<LocatedFileStatus> fileStatusItr = fs.listFiles(new Path(
+            "/"), true);
+    String uriPrefix = "o3fs://" + bucketName + "." + volumeName;
+    ArrayList<String> expectedPaths = new ArrayList<>();
+    expectedPaths.add(uriPrefix + dir1Dir1Dir2Key1.toString());
+    expectedPaths.add(uriPrefix + key1.toString());
+    expectedPaths.add(uriPrefix + key2.toString());
+    expectedPaths.add(uriPrefix + dir1Dir2Dir1Dir2Key1.toString());
+    int expectedFilesCount = expectedPaths.size();
+    int actualCount = 0;
+    while (fileStatusItr.hasNext()) {
+      LocatedFileStatus status = fileStatusItr.next();
+      expectedPaths.remove(status.getPath().toString());
+      actualCount++;
+    }
+    assertEquals("Failed to get all the files: " + expectedPaths,
+            expectedFilesCount, actualCount);
+    assertEquals("Failed to get all the files: " + expectedPaths, 0,
+            expectedPaths.size());
+
+    // Recursive=false
+    fileStatusItr = fs.listFiles(new Path("/"), false);
+    expectedPaths.clear();
+    expectedPaths.add(uriPrefix + "/key1");
+    expectedPaths.add(uriPrefix + "/key2");
+    expectedFilesCount = expectedPaths.size();
+    actualCount = 0;
+    while (fileStatusItr.hasNext()) {
+      LocatedFileStatus status = fileStatusItr.next();
+      expectedPaths.remove(status.getPath().toString());
+      actualCount++;
+    }
+    assertEquals("Failed to get all the files: " + expectedPaths, 0,
+            expectedPaths.size());
+    assertEquals("Failed to get all the files: " + expectedPaths,
+            expectedFilesCount, actualCount);
+  }
+
+  @Test(timeout = 300_000)
+  @Override
+  public void testFileSystem() throws Exception {
+    setupOzoneFileSystem();
+
+    testOzoneFsServiceLoader();
+    o3fs = (OzoneFileSystem) fs;
+
+    testCreateFileShouldCheckExistenceOfDirWithSameName();
+    // TODO: Cleanup keyTable and dirTable explicitly as FS delete operation
+    //  is not yet implemented. This should be replaced with fs.delete() call.
+    tableCleanup();
+    testMakeDirsWithAnExistingDirectoryPath();
+    tableCleanup();
+    testCreateWithInvalidPaths();
+    tableCleanup();
+    testListStatusWithoutRecursiveSearch();
+    tableCleanup();
+    testListFilesRecursive();
+    tableCleanup();
+
+    testGetDirectoryModificationTime();
+    tableCleanup();
+
+    testListStatusOnRoot();
+    tableCleanup();
+    testListStatus();
+    tableCleanup();
+    testListStatusOnSubDirs();
+    tableCleanup();
+    testListStatusOnLargeDirectory();
+    tableCleanup();
+  }
+
+  /**
+   * Cleanup keyTable and directoryTable explicitly as FS delete operation
+   * is not yet supported.
+   *
+   * @throws IOException DB failure
+   */
+  protected void tableCleanup() throws IOException {
+    OMMetadataManager metadataMgr = cluster.getOzoneManager()
+            .getMetadataManager();
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmDirectoryInfo>> dirTableIterator =
+            metadataMgr.getDirectoryTable().iterator();
+    dirTableIterator.seekToFirst();
+    ArrayList <String> dirList = new ArrayList<>();
+    while (dirTableIterator.hasNext()) {
+      String key = dirTableIterator.key();
+      if (StringUtils.isNotBlank(key)) {
+        dirList.add(key);
+      }
+      dirTableIterator.next();
+    }
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            cacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
+    while(cacheIterator.hasNext()){
+      cacheIterator.next();
+      cacheIterator.remove();
+    }
+
+    for (String dirKey : dirList) {
+      metadataMgr.getDirectoryTable().delete(dirKey);
+      Assert.assertNull("Unexpected entry!",
+              metadataMgr.getDirectoryTable().get(dirKey));
+    }
+
+    Assert.assertTrue("DirTable is not empty",
+            metadataMgr.getDirectoryTable().isEmpty());
+
+    Assert.assertFalse(metadataMgr.getDirectoryTable().cacheIterator()
+            .hasNext());
+
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmKeyInfo>> keyTableIterator =
+            metadataMgr.getKeyTable().iterator();
+    keyTableIterator.seekToFirst();
+    ArrayList <String> fileList = new ArrayList<>();
+    while (keyTableIterator.hasNext()) {
+      String key = keyTableIterator.key();
+      if (StringUtils.isNotBlank(key)) {
+        fileList.add(key);
+      }
+      keyTableIterator.next();
+    }
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            keyCacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
+    while(keyCacheIterator.hasNext()){
+      keyCacheIterator.next();
+      keyCacheIterator.remove();
+    }
+
+    for (String fileKey : fileList) {
+      metadataMgr.getKeyTable().delete(fileKey);
+      Assert.assertNull("Unexpected entry!",
+              metadataMgr.getKeyTable().get(fileKey));
+    }
+
+    Assert.assertTrue("KeyTable is not empty",
+            metadataMgr.getKeyTable().isEmpty());
+
+    rootItemCount = 0;
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfig() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+        enabledFileSystemPaths);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return conf;
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
index 6f5b113..9b07dcc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
@@ -31,12 +31,15 @@ import org.apache.ratis.server.raftlog.RaftLog;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URI;
+import java.util.ArrayList;
 
 /**
  * Test for HadoopDirTreeGenerator.
@@ -47,6 +50,8 @@ public class TestHadoopDirTreeGenerator {
   private OzoneConfiguration conf = null;
   private MiniOzoneCluster cluster = null;
   private ObjectStore store = null;
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestHadoopDirTreeGenerator.class);
 
   @Before
   public void setup() {
@@ -74,7 +79,7 @@ public class TestHadoopDirTreeGenerator {
    * @throws IOException
    */
   private void startCluster() throws Exception {
-    conf = new OzoneConfiguration();
+    conf = getOzoneConfiguration();
 
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
     cluster.waitForClusterToBeReady();
@@ -83,6 +88,10 @@ public class TestHadoopDirTreeGenerator {
     store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
   }
 
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @Test
   public void testNestedDirTreeGeneration() throws Exception {
     try {
@@ -103,6 +112,9 @@ public class TestHadoopDirTreeGenerator {
               2, 4, 2);
       verifyDirTree("vol5", "bucket1", 5,
               4, 1, 0);
+      // default page size is Constants.LISTING_PAGE_SIZE = 1024
+      verifyDirTree("vol6", "bucket1", 2,
+              1, 1100, 0);
     } finally {
       shutdown();
     }
@@ -122,6 +134,7 @@ public class TestHadoopDirTreeGenerator {
             fileCount + "", "-s", span + "", "-n", "1", "-r", rootPath,
                      "-g", perFileSizeInBytes + ""});
     // verify the directory structure
+    LOG.info("Started verifying the directory structure...");
     FileSystem fileSystem = FileSystem.get(URI.create(rootPath),
             conf);
     Path rootDir = new Path(rootPath.concat("/"));
@@ -149,6 +162,7 @@ public class TestHadoopDirTreeGenerator {
       verifyActualSpan(expectedSpanCnt, fileStatuses);
     }
     int actualNumFiles = 0;
+    ArrayList <String> files = new ArrayList<>();
     for (FileStatus fileStatus : fileStatuses) {
       if (fileStatus.isDirectory()) {
         ++depth;
@@ -157,6 +171,12 @@ public class TestHadoopDirTreeGenerator {
       } else {
         Assert.assertEquals("Mismatches file len",
                 perFileSizeInBytes, fileStatus.getLen());
+        String fName = fileStatus.getPath().getName();
+        Assert.assertFalse("actualNumFiles:" + actualNumFiles +
+                        ", fName:" + fName + ", expectedFileCnt:" +
+                        expectedFileCnt + ", depth:" + depth,
+                files.contains(fName));
+        files.add(fName);
         actualNumFiles++;
       }
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
new file mode 100644
index 0000000..99d4f26
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorV1.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+
+/**
+ * Test for HadoopDirTreeGenerator layout version V1.
+ */
+public class TestHadoopDirTreeGeneratorV1 extends TestHadoopDirTreeGenerator {
+
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    return conf;
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 83fa020..27ccf21 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -30,6 +30,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
@@ -75,6 +76,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -94,7 +96,9 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
 import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -124,6 +128,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BL
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND;
@@ -1780,6 +1785,10 @@ public class KeyManagerImpl implements KeyManager {
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
 
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return getOzoneFileStatusV1(volumeName, bucketName, keyName,
+              args.getSortDatanodes(), clientAddress, false);
+    }
     return getOzoneFileStatus(volumeName, bucketName, keyName,
             args.getRefreshPipeline(), args.getSortDatanodes(), clientAddress);
   }
@@ -1844,6 +1853,65 @@ public class KeyManagerImpl implements KeyManager {
             FILE_NOT_FOUND);
   }
 
+
+  private OzoneFileStatus getOzoneFileStatusV1(String volumeName,
+      String bucketName, String keyName, boolean sortDatanodes,
+      String clientAddress, boolean skipFileNotFoundError) throws IOException {
+    OzoneFileStatus fileStatus = null;
+    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
+            bucketName);
+    try {
+      // Check if this is the root of the filesystem.
+      if (keyName.length() == 0) {
+        validateBucket(volumeName, bucketName);
+        return new OzoneFileStatus();
+      }
+
+      fileStatus = OMFileRequest.getOMKeyInfoIfExists(metadataManager,
+              volumeName, bucketName, keyName, scmBlockSize);
+
+    } finally {
+      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+              bucketName);
+    }
+
+    if (fileStatus != null) {
+      // if the key is a file then do refresh pipeline info in OM by asking SCM
+      if (fileStatus.isFile()) {
+
+        OmKeyInfo fileKeyInfo = fileStatus.getKeyInfo();
+
+        // refreshPipeline flag check has been removed as part of
+        // https://issues.apache.org/jira/browse/HDDS-3658.
+        // Please refer this jira for more details.
+        refreshPipeline(fileKeyInfo);
+
+        if (sortDatanodes) {
+          sortDatanodeInPipeline(fileKeyInfo, clientAddress);
+        }
+        return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false);
+      } else {
+        return fileStatus;
+      }
+    }
+
+    // Key not found.
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Unable to get file status for the key: volume: {}, bucket:" +
+                      " {}, key: {}, with error: No such file exists.",
+              volumeName, bucketName, keyName);
+    }
+
+    // don't throw exception if this flag is true.
+    if (skipFileNotFoundError) {
+      return fileStatus;
+    }
+
+    throw new OMException("Unable to get file status: volume: " +
+            volumeName + " bucket: " + bucketName + " key: " + keyName,
+            FILE_NOT_FOUND);
+  }
+
   /**
    * Ozone FS api to create a directory. Parent directories if do not exist
    * are created for the input directory.
@@ -2083,6 +2151,10 @@ public class KeyManagerImpl implements KeyManager {
       return fileStatusList;
     }
 
+    if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+      return listStatusV1(args, recursive, startKey, numEntries, clientAddress);
+    }
+
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
@@ -2220,6 +2292,332 @@ public class KeyManagerImpl implements KeyManager {
     return fileStatusList;
   }
 
+  public List<OzoneFileStatus> listStatusV1(OmKeyArgs args, boolean recursive,
+      String startKey, long numEntries, String clientAddress)
+          throws IOException {
+    Preconditions.checkNotNull(args, "Key args can not be null");
+
+    // unsorted OMKeyInfo list contains combine results from TableCache and DB.
+    List<OzoneFileStatus> fileStatusFinalList = new ArrayList<>();
+    LinkedHashSet<OzoneFileStatus> fileStatusList = new LinkedHashSet<>();
+    if (numEntries <= 0) {
+      return fileStatusFinalList;
+    }
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    String seekFileInDB;
+    String seekDirInDB;
+    long prefixKeyInDB;
+    String prefixPath = keyName;
+    int countEntries = 0;
+
+    // TODO: recursive flag=true will be handled in HDDS-4360 jira.
+    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
+            bucketName);
+    try {
+      if (Strings.isNullOrEmpty(startKey)) {
+        OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
+        if (fileStatus.isFile()) {
+          return Collections.singletonList(fileStatus);
+        }
+
+        // Not required to search in DeletedTable because all the deleted
+        // keys will be marked directly in dirTable or in keyTable by
+        // breaking the pointer to its sub-dirs and sub-files. So, there is no
+        // issue of inconsistency.
+
+        /*
+         * keyName is a directory.
+         * Say, "/a" is the dir name and its objectID is 1024, then seek
+         * will be doing with "1024/" to get all immediate descendants.
+         */
+        if (fileStatus.getKeyInfo() != null) {
+          prefixKeyInDB = fileStatus.getKeyInfo().getObjectID();
+        } else {
+          // list root directory.
+          String bucketKey = metadataManager.getBucketKey(volumeName,
+                  bucketName);
+          OmBucketInfo omBucketInfo =
+                  metadataManager.getBucketTable().get(bucketKey);
+          prefixKeyInDB = omBucketInfo.getObjectID();
+        }
+        seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+        seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+
+        // Order of seek -> (1)Seek dirs in dirTable (2)Seek files in fileTable
+        // 1. Seek the given key in key table.
+        countEntries = getFilesFromDirectory(fileStatusList, seekFileInDB,
+                prefixPath, prefixKeyInDB, startKey, countEntries, numEntries);
+        // 2. Seek the given key in dir table.
+        getDirectories(fileStatusList, seekDirInDB, prefixPath, prefixKeyInDB,
+                startKey, countEntries, numEntries, volumeName, bucketName,
+                recursive);
+      } else {
+        /*
+         * startKey will be used in iterator seek and sets the beginning point
+         * for key traversal.
+         * keyName will be used as parentID where the user has requested to
+         * list the keys from.
+         *
+         * When recursive flag=false, parentID won't change between two pages.
+         * For example: OM has a namespace like,
+         *    /a/1...1M files and /a/b/1...1M files.
+         *    /a/1...1M directories and /a/b/1...1M directories.
+         * Listing "/a", will always have the parentID as "a" irrespective of
+         * the startKey value.
+         */
+
+        // Check startKey is an immediate child of keyName. For example,
+        // keyName=/a/ and expected startKey=/a/b. startKey can't be /xyz/b.
+        if (!OzoneFSUtils.isImmediateChild(keyName, startKey)) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("StartKey {} is not an immediate child of keyName {}. " +
+                    "Returns empty list", startKey, keyName);
+          }
+          return Collections.emptyList();
+        }
+
+        OzoneFileStatus fileStatusInfo = getOzoneFileStatusV1(volumeName,
+                bucketName, startKey, false, null, true);
+
+        if (fileStatusInfo != null) {
+          prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID();
+          if(fileStatusInfo.isDirectory()){
+            seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB,
+                    fileStatusInfo.getKeyInfo().getFileName());
+
+            // Order of seek -> (1) Seek dirs only in dirTable. In OM, always
+            // the order of search is, first seek into fileTable and then
+            // dirTable. So, its not required to search again in the fileTable.
+
+            // Seek the given key in dirTable.
+            getDirectories(fileStatusList, seekDirInDB, prefixPath,
+                    prefixKeyInDB, startKey, countEntries, numEntries,
+                    volumeName, bucketName, recursive);
+          } else {
+            seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB,
+                    fileStatusInfo.getKeyInfo().getFileName());
+            // begins from the first sub-dir under the parent dir
+            seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+
+            // 1. Seek the given key in key table.
+            countEntries = getFilesFromDirectory(fileStatusList, seekFileInDB,
+                    prefixPath, prefixKeyInDB, startKey, countEntries,
+                    numEntries);
+            // 2. Seek the given key in dir table.
+            getDirectories(fileStatusList, seekDirInDB, prefixPath,
+                    prefixKeyInDB, startKey, countEntries, numEntries,
+                    volumeName, bucketName, recursive);
+          }
+        } else {
+          // TODO: HDDS-4364: startKey can be a non-existed key
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("StartKey {} is a non-existed key and returning empty " +
+                    "list", startKey);
+          }
+          return Collections.emptyList();
+        }
+      }
+    } finally {
+      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+              bucketName);
+    }
+    for (OzoneFileStatus fileStatus : fileStatusList) {
+      if (fileStatus.isFile()) {
+        // refreshPipeline flag check has been removed as part of
+        // https://issues.apache.org/jira/browse/HDDS-3658.
+        // Please refer this jira for more details.
+        refreshPipeline(fileStatus.getKeyInfo());
+
+        // No need to check if a key is deleted or not here, this is handled
+        // when adding entries to cacheKeyMap from DB.
+        if (args.getSortDatanodes()) {
+          sortDatanodeInPipeline(fileStatus.getKeyInfo(), clientAddress);
+        }
+      }
+    }
+    fileStatusFinalList.addAll(fileStatusList);
+    return fileStatusFinalList;
+  }
+
+  @SuppressWarnings("parameternumber")
+  protected int getDirectories(Set<OzoneFileStatus> fileStatusList,
+      String seekDirInDB, String prefixPath, long prefixKeyInDB,
+      String startKey, int countEntries, long numEntries, String volumeName,
+      String bucketName, boolean recursive) throws IOException {
+
+    Table dirTable = metadataManager.getDirectoryTable();
+    countEntries = listStatusFindDirsInTableCache(fileStatusList, dirTable,
+            prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName,
+            bucketName, countEntries, numEntries);
+    TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
+            iterator = dirTable.iterator();
+
+    iterator.seek(seekDirInDB);
+
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmDirectoryInfo dirInfo = iterator.value().getValue();
+      if (!isImmediateChild(dirInfo.getParentObjectID(), prefixKeyInDB)) {
+        break;
+      }
+
+      // TODO: recursive list will be handled in HDDS-4360 jira.
+      if (!recursive) {
+        String dirName = OMFileRequest.getAbsolutePath(prefixPath,
+                dirInfo.getName());
+        OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
+                bucketName, dirInfo, dirName);
+        fileStatusList.add(new OzoneFileStatus(omKeyInfo, scmBlockSize,
+                true));
+        countEntries++;
+      }
+      // move to next entry in the DirTable
+      iterator.next();
+    }
+
+    return countEntries;
+  }
+
+  private int getFilesFromDirectory(Set<OzoneFileStatus> fileStatusList,
+      String seekKeyInDB, String prefixKeyPath, long prefixKeyInDB,
+      String startKey, int countEntries, long numEntries) throws IOException {
+
+    Table<String, OmKeyInfo> keyTable = metadataManager.getKeyTable();
+    countEntries = listStatusFindFilesInTableCache(fileStatusList, keyTable,
+            prefixKeyInDB, seekKeyInDB, prefixKeyPath, startKey,
+            countEntries, numEntries);
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+            iterator = keyTable.iterator();
+    iterator.seek(seekKeyInDB);
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmKeyInfo keyInfo = iterator.value().getValue();
+
+      if (!isImmediateChild(keyInfo.getParentObjectID(), prefixKeyInDB)) {
+        break;
+      }
+
+      keyInfo.setFileName(keyInfo.getKeyName());
+      String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              keyInfo.getKeyName());
+      keyInfo.setKeyName(fullKeyPath);
+      fileStatusList.add(new OzoneFileStatus(keyInfo, scmBlockSize, false));
+      countEntries++;
+      iterator.next(); // move to next entry in the table
+    }
+    return countEntries;
+  }
+
+  private boolean isImmediateChild(long parentId, long ancestorId) {
+    return parentId == ancestorId;
+  }
+
+  /**
+   * Helper function for listStatus to find key in FileTableCache.
+   */
+  @SuppressWarnings("parameternumber")
+  private int listStatusFindFilesInTableCache(
+          Set<OzoneFileStatus> fileStatusList, Table<String,
+          OmKeyInfo> keyTable, long prefixKeyInDB, String seekKeyInDB,
+          String prefixKeyPath, String startKey, int countEntries,
+          long numEntries) {
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
+            cacheIter = keyTable.cacheIterator();
+
+    // TODO: recursive list will be handled in HDDS-4360 jira.
+    while (cacheIter.hasNext() && numEntries - countEntries > 0) {
+      Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry =
+              cacheIter.next();
+      String cacheKey = entry.getKey().getCacheKey();
+      OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue();
+      // cacheOmKeyInfo is null if an entry is deleted in cache
+      if(cacheOmKeyInfo == null){
+        continue;
+      }
+
+      cacheOmKeyInfo.setFileName(cacheOmKeyInfo.getKeyName());
+      String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              cacheOmKeyInfo.getKeyName());
+      cacheOmKeyInfo.setKeyName(fullKeyPath);
+
+      countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
+              seekKeyInDB, startKey, countEntries, cacheKey, cacheOmKeyInfo,
+              false);
+    }
+    return countEntries;
+  }
+
+  /**
+   * Helper function for listStatus to find key in DirTableCache.
+   */
+  @SuppressWarnings("parameternumber")
+  private int listStatusFindDirsInTableCache(
+          Set<OzoneFileStatus> fileStatusList, Table<String,
+          OmDirectoryInfo> dirTable, long prefixKeyInDB, String seekKeyInDB,
+          String prefixKeyPath, String startKey, String volumeName,
+          String bucketName, int countEntries, long numEntries) {
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            cacheIter = dirTable.cacheIterator();
+    // seekKeyInDB will have two type of values.
+    // 1. "1024/"   -> startKey is null or empty
+    // 2. "1024/b"  -> startKey exists
+    // TODO: recursive list will be handled in HDDS-4360 jira.
+    while (cacheIter.hasNext() && numEntries - countEntries > 0) {
+      Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>> entry =
+              cacheIter.next();
+      String cacheKey = entry.getKey().getCacheKey();
+      OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue();
+      if(cacheOmDirInfo == null){
+        continue;
+      }
+      String fullDirPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              cacheOmDirInfo.getName());
+      OmKeyInfo cacheDirKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
+              bucketName, cacheOmDirInfo, fullDirPath);
+
+      countEntries = addKeyInfoToFileStatusList(fileStatusList, prefixKeyInDB,
+              seekKeyInDB, startKey, countEntries, cacheKey, cacheDirKeyInfo,
+              true);
+    }
+    return countEntries;
+  }
+
+  @SuppressWarnings("parameternumber")
+  private int addKeyInfoToFileStatusList(Set<OzoneFileStatus> fileStatusList,
+      long prefixKeyInDB, String seekKeyInDB, String startKey,
+      int countEntries, String cacheKey, OmKeyInfo cacheOmKeyInfo,
+      boolean isDirectory) {
+    // seekKeyInDB will have two type of values.
+    // 1. "1024/"   -> startKey is null or empty
+    // 2. "1024/b"  -> startKey exists
+    if (StringUtils.isBlank(startKey)) {
+      // startKey is null or empty, then the seekKeyInDB="1024/"
+      if (cacheKey.startsWith(seekKeyInDB)) {
+        OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
+                scmBlockSize, isDirectory);
+        fileStatusList.add(fileStatus);
+        countEntries++;
+      }
+    } else {
+      // startKey not empty, then the seekKeyInDB="1024/b" and
+      // seekKeyInDBWithOnlyParentID = "1024/". This is to avoid case of
+      // parentID with "102444" cache entries.
+      // Here, it has to list all the keys after "1024/b" and requires >=0
+      // string comparison.
+      String seekKeyInDBWithOnlyParentID = prefixKeyInDB + OM_KEY_PREFIX;
+      if (cacheKey.startsWith(seekKeyInDBWithOnlyParentID) &&
+              cacheKey.compareTo(seekKeyInDB) >= 0) {
+        OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
+                scmBlockSize, isDirectory);
+        fileStatusList.add(fileStatus);
+        countEntries++;
+      }
+    }
+    return countEntries;
+  }
+
   private String getNextGreaterString(String volumeName, String bucketName,
       String keyPrefix) throws IOException {
     // Increment the last character of the string and return the new ozone key.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
index 4367018..2ff6d9a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
@@ -149,8 +149,8 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
 
         // prepare all missing parents
         missingParentInfos =
-                OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
-                        ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+                OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(keyArgs,
+                        omPathInfo, trxnLogIndex);
 
         // prepare leafNode dir
         OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(
@@ -233,7 +233,6 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
   /**
    * Construct OmDirectoryInfo for every parent directory in missing list.
    *
-   * @param ozoneManager Ozone Manager
    * @param keyArgs      key arguments
    * @param pathInfo     list of parent directories to be created and its ACLs
    * @param trxnLogIndex transaction log index id
@@ -241,10 +240,8 @@ public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
    * @throws IOException DB failure
    */
   public static List<OmDirectoryInfo> getAllMissingParentDirInfo(
-          OzoneManager ozoneManager, KeyArgs keyArgs,
-          OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex)
-          throws IOException {
-    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+          KeyArgs keyArgs, OMFileRequest.OMPathInfoV1 pathInfo,
+          long trxnLogIndex) throws IOException {
     List<OmDirectoryInfo> missingParentInfos = new ArrayList<>();
 
     ImmutablePair<Long, Long> objIdRange = OMFileRequest
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
index e46416b..03f3639 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -153,8 +153,8 @@ public class OMFileCreateRequestV1 extends OMFileCreateRequest {
 
       // add all missing parents to dir table
       missingParentInfos =
-              OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
-                      ozoneManager, keyArgs, pathInfoV1, trxnLogIndex);
+              OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(keyArgs,
+                      pathInfoV1, trxnLogIndex);
 
       // total number of keys created.
       numKeysCreated = missingParentInfos.size();
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 833c564..91be9a2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -20,13 +20,17 @@ package org.apache.hadoop.ozone.om.request.file;
 
 import java.io.IOException;
 import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -36,7 +40,10 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -463,7 +470,6 @@ public final class OMFileRequest {
    * @param omFileInfo        key info
    * @param fileName          file name
    * @param trxnLogIndex      transaction log index
-   * @return dbOmFileInfo, which keeps leaf node name in keyName field
    */
   public static void addOpenFileTableCacheEntry(
           OMMetadataManager omMetadataManager, String dbOpenFileName,
@@ -491,7 +497,6 @@ public final class OMFileRequest {
    * @param omFileInfo        key info
    * @param fileName          file name
    * @param trxnLogIndex      transaction log index
-   * @return dbOmFileInfo, which keeps leaf node name in keyName field
    */
   public static void addFileTableCacheEntry(
           OMMetadataManager omMetadataManager, String dbFileKey,
@@ -583,4 +588,112 @@ public final class OMFileRequest {
     return dbOmKeyInfo;
   }
 
+  /**
+   * Gets OmKeyInfo if exists for the given key name in the DB.
+   *
+   * @param omMetadataMgr metadata manager
+   * @param volumeName    volume name
+   * @param bucketName    bucket name
+   * @param keyName       key name
+   * @param scmBlockSize  scm block size
+   * @return OzoneFileStatus
+   * @throws IOException DB failure
+   */
+  @Nullable
+  public static OzoneFileStatus getOMKeyInfoIfExists(
+      OMMetadataManager omMetadataMgr, String volumeName, String bucketName,
+      String keyName, long scmBlockSize) throws IOException {
+
+    Path keyPath = Paths.get(keyName);
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataMgr.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataMgr.getBucketTable().get(bucketKey);
+
+    long lastKnownParentId = omBucketInfo.getObjectID();
+    OmDirectoryInfo omDirInfo = null;
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+
+      // For example, /vol1/buck1/a/b/c/d/e/file1.txt
+      // 1. Do lookup path component on directoryTable starting from bucket
+      // 'buck1' to the leaf node component, which is 'file1.txt'.
+      // 2. If there is no dir exists for the leaf node component 'file1.txt'
+      // then do look it on fileTable.
+      String dbNodeName = omMetadataMgr.getOzonePathKey(
+              lastKnownParentId, fileName);
+      omDirInfo = omMetadataMgr.getDirectoryTable().get(dbNodeName);
+
+      if (omDirInfo != null) {
+        lastKnownParentId = omDirInfo.getObjectID();
+      } else if (!elements.hasNext()) {
+        // reached last path component. Check file exists for the given path.
+        OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+                omMetadataMgr, dbNodeName, keyName);
+        if (omKeyInfo != null) {
+          return new OzoneFileStatus(omKeyInfo, scmBlockSize, false);
+        }
+      } else {
+        // Missing intermediate directory and just return null;
+        // key not found in DB
+        return null;
+      }
+    }
+
+    if (omDirInfo != null) {
+      OmKeyInfo omKeyInfo = getOmKeyInfo(volumeName, bucketName, omDirInfo,
+              keyName);
+      return new OzoneFileStatus(omKeyInfo, scmBlockSize, true);
+    }
+
+    // key not found in DB
+    return null;
+  }
+
+  /**
+   * Prepare OmKeyInfo from OmDirectoryInfo.
+   *
+   * @param volumeName volume name
+   * @param bucketName bucket name
+   * @param dirInfo    directory info
+   * @param keyName    user given key name
+   * @return OmKeyInfo object
+   */
+  @NotNull
+  public static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+      OmDirectoryInfo dirInfo, String keyName) {
+
+    OmKeyInfo.Builder builder = new OmKeyInfo.Builder();
+    builder.setParentObjectID(dirInfo.getParentObjectID());
+    builder.setKeyName(keyName);
+    builder.setAcls(dirInfo.getAcls());
+    builder.addAllMetadata(dirInfo.getMetadata());
+    builder.setVolumeName(volumeName);
+    builder.setBucketName(bucketName);
+    builder.setCreationTime(dirInfo.getCreationTime());
+    builder.setModificationTime(dirInfo.getModificationTime());
+    builder.setObjectID(dirInfo.getObjectID());
+    builder.setUpdateID(dirInfo.getUpdateID());
+    builder.setFileName(dirInfo.getName());
+    builder.setReplicationType(HddsProtos.ReplicationType.RATIS);
+    builder.setReplicationFactor(HddsProtos.ReplicationFactor.ONE);
+    builder.setOmKeyLocationInfos(Collections.singletonList(
+            new OmKeyLocationInfoGroup(0, new ArrayList<>())));
+    return builder.build();
+  }
+
+  /**
+   * Returns absolute path.
+   *
+   * @param prefixName prefix path
+   * @param fileName   file name
+   * @return absolute path
+   */
+  @NotNull
+  public static String getAbsolutePath(String prefixName, String fileName) {
+    if (Strings.isNullOrEmpty(prefixName)) {
+      return fileName;
+    }
+    return prefixName.concat(OzoneConsts.OZONE_URI_DELIMITER).concat(fileName);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 01/03: HDDS-2949: mkdir : store directory entries in a separate table (#1404)

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch HDDS-2939
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 88a41083ca6f0669d4ed9b242ec252b30fc538c3
Author: Rakesh Radhakrishnan <ra...@apache.org>
AuthorDate: Fri Oct 2 00:45:19 2020 +0530

    HDDS-2949: mkdir : store directory entries in a separate table (#1404)
---
 .../common/src/main/resources/ozone-default.xml    |  10 +
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   7 +
 .../hadoop/ozone/om/helpers/OmDirectoryInfo.java   | 266 +++++++++
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |  15 +
 .../apache/hadoop/fs/ozone/TestOzoneDirectory.java | 207 +++++++
 .../src/main/proto/OmClientProtocol.proto          |  11 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |  17 +
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |   4 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  24 +
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  14 +
 .../ozone/om/codec/OmDirectoryInfoCodec.java       |  60 ++
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |  18 +
 .../request/file/OMDirectoryCreateRequestV1.java   | 312 ++++++++++
 .../ozone/om/request/file/OMFileRequest.java       | 199 +++++++
 .../response/file/OMDirectoryCreateResponseV1.java | 103 ++++
 .../ozone/om/request/TestOMRequestUtils.java       |  37 ++
 .../file/TestOMDirectoryCreateRequestV1.java       | 649 +++++++++++++++++++++
 .../file/TestOMDirectoryCreateResponseV1.java      |  88 +++
 18 files changed, 2041 insertions(+)

diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 4853978..4689f02 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2546,4 +2546,14 @@
       filesystem semantics.
     </description>
   </property>
+
+  <property>
+    <name>ozone.om.layout.version</name>
+    <tag>OZONE, OM</tag>
+    <value>V0</value>
+    <description>Temporary workaround for OM upgrade and will be replaced once
+      upgrade HDDS-3698 story reaches consensus. Defaulting to 'V0' so that
+      existing unit test cases won't be affected. New OM version should be 'V1'
+    </description>
+  </property>
 </configuration>
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index f16679a..db3c453 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -246,4 +246,11 @@ public final class OMConfigKeys {
       "ozone.om.enable.filesystem.paths";
   public static final boolean OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT =
       false;
+
+  // TODO: Temporary workaround for OM upgrade path and will be replaced once
+  //  upgrade HDDS-3698 story reaches consensus. Defaulting to 'V0' so that
+  //  existing unit test cases won't be affected. New OM version should be 'V1'.
+  public static final String OZONE_OM_LAYOUT_VERSION =
+          "ozone.om.layout.version";
+  public static final String OZONE_OM_LAYOUT_VERSION_DEFAULT = "V0";
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
new file mode 100644
index 0000000..4c82047
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
@@ -0,0 +1,266 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+
+import java.util.*;
+
+/**
+ * This class represents the directory information by keeping each component
+ * in the user given path and a pointer to its parent directory element in the
+ * path. Also, it stores directory node related metdata details.
+ */
+public class OmDirectoryInfo extends WithObjectID {
+  private long parentObjectID; // pointer to parent directory
+
+  private String name; // directory name
+
+  private long creationTime;
+  private long modificationTime;
+
+  private List<OzoneAcl> acls;
+
+  public OmDirectoryInfo(Builder builder) {
+    this.name = builder.name;
+    this.acls = builder.acls;
+    this.metadata = builder.metadata;
+    this.objectID = builder.objectID;
+    this.updateID = builder.updateID;
+    this.parentObjectID = builder.parentObjectID;
+    this.creationTime = builder.creationTime;
+    this.modificationTime = builder.modificationTime;
+  }
+
+  /**
+   * Returns new builder class that builds a OmPrefixInfo.
+   *
+   * @return Builder
+   */
+  public static OmDirectoryInfo.Builder newBuilder() {
+    return new OmDirectoryInfo.Builder();
+  }
+
+  /**
+   * Builder for Directory Info.
+   */
+  public static class Builder {
+    private long parentObjectID; // pointer to parent directory
+
+    private long objectID;
+    private long updateID;
+
+    private String name;
+
+    private long creationTime;
+    private long modificationTime;
+
+    private List<OzoneAcl> acls;
+    private Map<String, String> metadata;
+
+    public Builder() {
+      //Default values
+      this.acls = new LinkedList<>();
+      this.metadata = new HashMap<>();
+    }
+
+    public Builder setParentObjectID(long parentObjectId) {
+      this.parentObjectID = parentObjectId;
+      return this;
+    }
+
+    public Builder setObjectID(long objectId) {
+      this.objectID = objectId;
+      return this;
+    }
+
+    public Builder setUpdateID(long updateId) {
+      this.updateID = updateId;
+      return this;
+    }
+
+    public Builder setName(String dirName) {
+      this.name = dirName;
+      return this;
+    }
+
+    public Builder setCreationTime(long newCreationTime) {
+      this.creationTime = newCreationTime;
+      return this;
+    }
+
+    public Builder setModificationTime(long newModificationTime) {
+      this.modificationTime = newModificationTime;
+      return this;
+    }
+
+    public Builder setAcls(List<OzoneAcl> listOfAcls) {
+      if (listOfAcls != null) {
+        this.acls.addAll(listOfAcls);
+      }
+      return this;
+    }
+
+    public Builder addAcl(OzoneAcl ozoneAcl) {
+      if (ozoneAcl != null) {
+        this.acls.add(ozoneAcl);
+      }
+      return this;
+    }
+
+    public Builder addMetadata(String key, String value) {
+      metadata.put(key, value);
+      return this;
+    }
+
+    public Builder addAllMetadata(Map<String, String> additionalMetadata) {
+      if (additionalMetadata != null) {
+        metadata.putAll(additionalMetadata);
+      }
+      return this;
+    }
+
+    public OmDirectoryInfo build() {
+      return new OmDirectoryInfo(this);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return getPath() + ":" + getObjectID();
+  }
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+  public String getPath() {
+    return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getName();
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  public long getModificationTime() {
+    return modificationTime;
+  }
+
+  public List<OzoneAcl> getAcls() {
+    return acls;
+  }
+
+  /**
+   * Creates DirectoryInfo protobuf from OmDirectoryInfo.
+   */
+  public OzoneManagerProtocolProtos.DirectoryInfo getProtobuf() {
+    OzoneManagerProtocolProtos.DirectoryInfo.Builder pib =
+            OzoneManagerProtocolProtos.DirectoryInfo.newBuilder().setName(name)
+                    .setCreationTime(creationTime)
+                    .setModificationTime(modificationTime)
+                    .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
+                    .setObjectID(objectID)
+                    .setUpdateID(updateID)
+                    .setParentID(parentObjectID);
+    if (acls != null) {
+      pib.addAllAcls(OzoneAclUtil.toProtobuf(acls));
+    }
+    return pib.build();
+  }
+
+  /**
+   * Parses DirectoryInfo protobuf and creates OmPrefixInfo.
+   * @param dirInfo
+   * @return instance of OmDirectoryInfo
+   */
+  public static OmDirectoryInfo getFromProtobuf(
+          OzoneManagerProtocolProtos.DirectoryInfo dirInfo) {
+    OmDirectoryInfo.Builder opib = OmDirectoryInfo.newBuilder()
+            .setName(dirInfo.getName())
+            .setCreationTime(dirInfo.getCreationTime())
+            .setModificationTime(dirInfo.getModificationTime())
+            .setAcls(OzoneAclUtil.fromProtobuf(dirInfo.getAclsList()));
+    if (dirInfo.getMetadataList() != null) {
+      opib.addAllMetadata(KeyValueUtil
+              .getFromProtobuf(dirInfo.getMetadataList()));
+    }
+    if (dirInfo.hasObjectID()) {
+      opib.setObjectID(dirInfo.getObjectID());
+    }
+    if (dirInfo.hasParentID()) {
+      opib.setParentObjectID(dirInfo.getParentID());
+    }
+    if (dirInfo.hasUpdateID()) {
+      opib.setUpdateID(dirInfo.getUpdateID());
+    }
+    return opib.build();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    OmDirectoryInfo omDirInfo = (OmDirectoryInfo) o;
+    return creationTime == omDirInfo.creationTime &&
+            modificationTime == omDirInfo.modificationTime &&
+            name.equals(omDirInfo.name) &&
+            Objects.equals(metadata, omDirInfo.metadata) &&
+            Objects.equals(acls, omDirInfo.acls) &&
+            objectID == omDirInfo.objectID &&
+            updateID == omDirInfo.updateID &&
+            parentObjectID == omDirInfo.parentObjectID;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(objectID, parentObjectID, name);
+  }
+
+  /**
+   * Return a new copy of the object.
+   */
+  public OmDirectoryInfo copyObject() {
+    OmDirectoryInfo.Builder builder = new Builder()
+            .setName(name)
+            .setCreationTime(creationTime)
+            .setModificationTime(modificationTime)
+            .setParentObjectID(parentObjectID)
+            .setObjectID(objectID)
+            .setUpdateID(updateID);
+
+    acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(),
+            acl.getName(), (BitSet) acl.getAclBitSet().clone(),
+            acl.getAclScope())));
+
+    if (metadata != null) {
+      metadata.forEach((k, v) -> builder.addMetadata(k, v));
+    }
+
+    return builder.build();
+  }
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index d1491ed..96df56f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.helpers;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.StringUtils;
 
+import javax.annotation.Nonnull;
 import java.nio.file.Paths;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
@@ -116,4 +117,18 @@ public final class OzoneFSUtils {
     }
     return true;
   }
+
+  /**
+   * The function returns leaf node name from the given absolute path. For
+   * example, the given key path '/a/b/c/d/e/file1' then it returns leaf node
+   * name 'file1'.
+   */
+  public static String getFileName(@Nonnull String keyName) {
+    java.nio.file.Path fileName = Paths.get(keyName).getFileName();
+    if (fileName != null) {
+      return fileName.toString();
+    }
+    // failed to converts a path key
+    return keyName;
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
new file mode 100644
index 0000000..87e9f09
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.Assert.fail;
+
+/**
+ * Test verifies the entries and operations in directory table.
+ */
+public class TestOzoneDirectory {
+
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestOzoneDirectory.class);
+
+  private MiniOzoneCluster cluster;
+  private FileSystem fs;
+  private OzoneFileSystem o3fs;
+  private String volumeName;
+  private String bucketName;
+
+  @Test(timeout = 300_000)
+  public void testMultiLevelDirs() throws Exception {
+    setupOzoneFileSystem();
+    // Op 1. create dir -> /d1/d2/d3/d4/
+    // Op 2. create dir -> /d1/d2/d3/d4/d5
+    // Op 3. create dir -> /d1/d2/d3/d4/d6
+    Path parent = new Path("/d1/d2/d3/d4/");
+    fs.mkdirs(parent);
+
+    OMMetadataManager omMgr = cluster.getOzoneManager().getMetadataManager();
+    OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(
+            omMgr.getBucketKey(volumeName, bucketName));
+    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
+
+    ArrayList<String> dirKeys = new ArrayList<>();
+    long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1",
+            dirKeys, omMgr);
+    long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys,
+            omMgr);
+    long d3ObjectID = verifyDirKey(d2ObjectID, "d3", "/d1/d2/d3",
+            dirKeys, omMgr);
+    long d4ObjectID = verifyDirKey(d3ObjectID, "d4", "/d1/d2/d3/d4",
+            dirKeys, omMgr);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            4, cluster.getOzoneManager().getMetrics().getNumKeys());
+
+    // verify entries in directory table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmDirectoryInfo>> iterator =
+            omMgr.getDirectoryTable().iterator();
+    iterator.seekToFirst();
+    int count = dirKeys.size();
+    Assert.assertEquals("Unexpected directory table entries!", 4, count);
+    while (iterator.hasNext()) {
+      count--;
+      Table.KeyValue<String, OmDirectoryInfo> value = iterator.next();
+      verifyKeyFormat(value.getKey(), dirKeys);
+    }
+    Assert.assertEquals("Unexpected directory table entries!", 0, count);
+
+    // verify entries in key table
+    TableIterator<String, ? extends
+            Table.KeyValue<String, OmKeyInfo>> keyTableItr =
+            omMgr.getKeyTable().iterator();
+    while (keyTableItr.hasNext()) {
+      fail("Shouldn't add any entries in KeyTable!");
+    }
+
+    // create sub-dirs under same parent
+    Path subDir5 = new Path("/d1/d2/d3/d4/d5");
+    fs.mkdirs(subDir5);
+    Path subDir6 = new Path("/d1/d2/d3/d4/d6");
+    fs.mkdirs(subDir6);
+    long d5ObjectID = verifyDirKey(d4ObjectID, "d5",
+            "/d1/d2/d3/d4/d5", dirKeys, omMgr);
+    long d6ObjectID = verifyDirKey(d4ObjectID, "d6",
+            "/d1/d2/d3/d4/d6", dirKeys, omMgr);
+    Assert.assertTrue("Wrong objectIds for sub-dirs[" + d5ObjectID +
+                    "/d5, " + d6ObjectID + "/d6] of same parent!",
+            d5ObjectID != d6ObjectID);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            6, cluster.getOzoneManager().getMetrics().getNumKeys());
+  }
+
+  /**
+   * Verify key name format and the DB key existence in the expected dirKeys
+   * list.
+   *
+   * @param key     table keyName
+   * @param dirKeys expected keyName
+   */
+  private void verifyKeyFormat(String key, ArrayList<String> dirKeys) {
+    String[] keyParts = StringUtils.split(key,
+            OzoneConsts.OM_KEY_PREFIX.charAt(0));
+    Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
+    boolean removed = dirKeys.remove(key);
+    Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
+            removed);
+  }
+
+  long verifyDirKey(long parentId, String dirKey, String absolutePath,
+                    ArrayList<String> dirKeys, OMMetadataManager omMgr)
+          throws Exception {
+    String dbKey = parentId + "/" + dirKey;
+    dirKeys.add(dbKey);
+    OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey);
+    Assert.assertNotNull("Failed to find " + absolutePath +
+            " using dbKey: " + dbKey, dirInfo);
+    Assert.assertEquals("Parent Id mismatches", parentId,
+            dirInfo.getParentObjectID());
+    Assert.assertEquals("Mismatches directory name", dirKey,
+            dirInfo.getName());
+    Assert.assertTrue("Mismatches directory creation time param",
+            dirInfo.getCreationTime() > 0);
+    Assert.assertEquals("Mismatches directory modification time param",
+            dirInfo.getCreationTime(), dirInfo.getModificationTime());
+    Assert.assertEquals("Wrong representation!",
+            dbKey + ":" + dirInfo.getObjectID(), dirInfo.toString());
+    return dirInfo.getObjectID();
+  }
+
+  private void setupOzoneFileSystem()
+          throws IOException, TimeoutException, InterruptedException {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .build();
+    cluster.waitForClusterToBeReady();
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+            bucket.getVolumeName());
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+    fs = FileSystem.get(conf);
+  }
+
+  @After
+  public void tearDown() {
+    IOUtils.closeQuietly(fs);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index b347dc1..abbee63 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -770,6 +770,17 @@ message KeyInfo {
     optional uint64 updateID = 15;
 }
 
+message DirectoryInfo {
+    required string name = 1;
+    required uint64 creationTime = 2;
+    required uint64 modificationTime = 3;
+    repeated hadoop.hdds.KeyValue metadata = 4;
+    repeated OzoneAclInfo acls = 5;
+    required uint64 objectID = 6;
+    required uint64 updateID = 7;
+    required uint64 parentID = 8;
+}
+
 message RepeatedKeyInfo {
     repeated KeyInfo keyInfo = 1;
 }
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index c687a4b..4c9025b 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -24,6 +24,7 @@ import java.util.Set;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
@@ -365,6 +366,12 @@ public interface OMMetadataManager {
       String bucketName, String prefix) throws IOException;
 
   /**
+   * Gets the DirectoryTable.
+   * @return Table.
+   */
+  Table<String, OmDirectoryInfo> getDirectoryTable();
+
+  /**
    * Return table mapped to the specified table name.
    * @param tableName
    * @return Table
@@ -382,4 +389,14 @@ public interface OMMetadataManager {
    * @return table names in OM DB.
    */
   Set<String> listTableNames();
+
+  /**
+   * Given a volume, bucket and a key, return the corresponding DB prefixKey
+   * key.
+   *
+   * @param parentObjectId - parent object Id
+   * @param pathComponentName   - path component name
+   * @return DB directory key as String.
+   */
+  String getOzonePathKey(long parentObjectId, String pathComponentName);
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
index 7e79fe7..9139055 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -200,6 +200,10 @@ public class OMMetrics {
     numKeys.incr();
   }
 
+  public void incNumKeys(long val) {
+    numKeys.incr(val);
+  }
+
   public void decNumKeys() {
     numKeys.incr(-1);
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index da7e985..3e9a476 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.codec.OMTransactionInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec;
+import org.apache.hadoop.ozone.om.codec.OmDirectoryInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec;
@@ -59,6 +60,7 @@ import org.apache.hadoop.ozone.om.codec.UserVolumeInfoCodec;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@@ -125,6 +127,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * |----------------------------------------------------------------------|
    * |  multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...   |
    * |----------------------------------------------------------------------|
+   * |  directoryTable    | parentId/directoryName -> DirectoryInfo         |
    * |----------------------------------------------------------------------|
    * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
    * |----------------------------------------------------------------------|
@@ -140,6 +143,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String S3_SECRET_TABLE = "s3SecretTable";
   public static final String DELEGATION_TOKEN_TABLE = "dTokenTable";
   public static final String PREFIX_TABLE = "prefixTable";
+  public static final String DIRECTORY_TABLE = "directoryTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
@@ -158,6 +162,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private Table s3SecretTable;
   private Table dTokenTable;
   private Table prefixTable;
+  private Table dirTable;
   private Table transactionInfoTable;
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
@@ -231,6 +236,11 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   }
 
   @Override
+  public Table<String, OmDirectoryInfo> getDirectoryTable() {
+    return dirTable;
+  }
+
+  @Override
   public Table<String, OmMultipartKeyInfo> getMultipartInfoTable() {
     return multipartInfoTable;
   }
@@ -323,6 +333,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addTable(DELEGATION_TOKEN_TABLE)
         .addTable(S3_SECRET_TABLE)
         .addTable(PREFIX_TABLE)
+        .addTable(DIRECTORY_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -334,6 +345,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         .addCodec(OmMultipartKeyInfo.class, new OmMultipartKeyInfoCodec())
         .addCodec(S3SecretValue.class, new S3SecretValueCodec())
         .addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec())
+        .addCodec(OmDirectoryInfo.class, new OmDirectoryInfoCodec())
         .addCodec(OMTransactionInfo.class, new OMTransactionInfoCodec());
   }
 
@@ -388,6 +400,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
         OmPrefixInfo.class);
     checkTableStatus(prefixTable, PREFIX_TABLE);
 
+    dirTable = this.store.getTable(DIRECTORY_TABLE, String.class,
+            OmDirectoryInfo.class);
+    checkTableStatus(dirTable, DIRECTORY_TABLE);
+
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
         String.class, OMTransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
@@ -1135,4 +1151,12 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
     return tableMap.keySet();
   }
 
+  @Override
+  public String getOzonePathKey(long parentObjectId, String pathComponentName) {
+    StringBuilder builder = new StringBuilder();
+    builder.append(parentObjectId);
+    builder.append(OM_KEY_PREFIX).append(pathComponentName);
+    return builder.toString();
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 258564c..d1fd246 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -215,6 +215,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEF
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
@@ -1140,6 +1142,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       omRatisServer.start();
     }
 
+    // TODO: Temporary workaround for OM upgrade path and will be replaced once
+    //  upgrade HDDS-3698 story reaches consensus.
+    getOMLayoutVersion();
+
     metadataManager.start(configuration);
     startSecretManagerIfNecessary();
 
@@ -3597,6 +3603,14 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT);
   }
 
+  private void getOMLayoutVersion() {
+    String version = configuration.getTrimmed(OZONE_OM_LAYOUT_VERSION,
+            OZONE_OM_LAYOUT_VERSION_DEFAULT);
+    boolean omLayoutVersionV1 =
+            StringUtils.equalsIgnoreCase(version, "V1");
+    OzoneManagerRatisUtils.setOmLayoutVersionV1(omLayoutVersionV1);
+  }
+
   /**
    * Create volume which is required for S3Gateway operations.
    * @throws IOException
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
new file mode 100644
index 0000000..ba592a9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.codec;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DirectoryInfo;
+
+import java.io.IOException;
+
+/**
+ * Codec to encode OmDirectoryInfo as byte array.
+ */
+public class OmDirectoryInfoCodec implements Codec<OmDirectoryInfo> {
+
+  @Override
+  public byte[] toPersistedFormat(OmDirectoryInfo object) throws IOException {
+    Preconditions
+            .checkNotNull(object, "Null object can't be converted " +
+                    "to byte array.");
+    return object.getProtobuf().toByteArray();
+  }
+
+  @Override
+  public OmDirectoryInfo fromPersistedFormat(byte[] rawData)
+          throws IOException {
+    Preconditions
+            .checkNotNull(rawData,
+                    "Null byte array can't converted to real object.");
+    try {
+      return OmDirectoryInfo.getFromProtobuf(DirectoryInfo.parseFrom(rawData));
+    } catch (InvalidProtocolBufferException e) {
+      throw new IllegalArgumentException(
+              "Can't encode the the raw data from the byte array", e);
+    }
+  }
+
+  @Override
+  public OmDirectoryInfo copyObject(OmDirectoryInfo object) {
+    return object.copyObject();
+  }
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 681c0da..1ea225b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketAddAclRequest;
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
 import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
@@ -83,8 +84,22 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_
  */
 public final class OzoneManagerRatisUtils {
 
+  // TODO: Temporary workaround for OM upgrade path and will be replaced once
+  //  upgrade HDDS-3698 story reaches consensus.
+  private static boolean omLayoutVersionV1 = true;
+
   private OzoneManagerRatisUtils() {
   }
+
+  /**
+   * Sets layout version.
+   *
+   * @param layoutVersionV1 om layout version
+   */
+  public static void setOmLayoutVersionV1(boolean layoutVersionV1) {
+    OzoneManagerRatisUtils.omLayoutVersionV1 = layoutVersionV1;
+  }
+
   /**
    * Create OMClientRequest which encapsulates the OMRequest.
    * @param omRequest
@@ -133,6 +148,9 @@ public final class OzoneManagerRatisUtils {
     case RenameKeys:
       return new OMKeysRenameRequest(omRequest);
     case CreateDirectory:
+      if (omLayoutVersionV1) {
+        return new OMDirectoryCreateRequestV1(omRequest);
+      }
       return new OMDirectoryCreateRequest(omRequest);
     case CreateFile:
       return new OMFileCreateRequest(omRequest);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
new file mode 100644
index 0000000..b8b5697
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
@@ -0,0 +1,312 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .CreateDirectoryRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .CreateDirectoryResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .Status;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*;
+
+/**
+ * Handle create directory request. It will add path components to the directory
+ * table and maintains file system semantics.
+ */
+public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMDirectoryCreateRequestV1.class);
+
+  public OMDirectoryCreateRequestV1(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    CreateDirectoryRequest createDirectoryRequest = getOmRequest()
+        .getCreateDirectoryRequest();
+    KeyArgs keyArgs = createDirectoryRequest.getKeyArgs();
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+    int numKeysCreated = 0;
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    omResponse.setCreateDirectoryResponse(CreateDirectoryResponse.newBuilder());
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumCreateDirectory();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
+
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    boolean acquiredLock = false;
+    IOException exception = null;
+    OMClientResponse omClientResponse = null;
+    Result result = Result.FAILURE;
+    List<OmDirectoryInfo> missingParentInfos;
+
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acl
+      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+      // Check if this is the root of the filesystem.
+      if (keyName.length() == 0) {
+        throw new OMException("Directory create failed. Cannot create " +
+            "directory at root of the filesystem",
+            OMException.ResultCodes.CANNOT_CREATE_DIRECTORY_AT_ROOT);
+      }
+      // acquire lock
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      Path keyPath = Paths.get(keyName);
+
+      // Need to check if any files exist in the given path, if they exist we
+      // cannot create a directory with the given key.
+      // Verify the path against directory table
+      OMFileRequest.OMPathInfoV1 omPathInfo =
+          OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName,
+              bucketName, keyName, keyPath);
+      OMFileRequest.OMDirectoryResult omDirectoryResult =
+          omPathInfo.getDirectoryResult();
+
+      if (omDirectoryResult == FILE_EXISTS ||
+          omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
+        throw new OMException("Unable to create directory: " + keyName
+            + " in volume/bucket: " + volumeName + "/" + bucketName + " as " +
+                "file:" + omPathInfo.getFileExistsInPath() + " already exists",
+            FILE_ALREADY_EXISTS);
+      } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH ||
+          omDirectoryResult == NONE) {
+
+        // prepare all missing parents
+        missingParentInfos = OMDirectoryCreateRequestV1.getAllParentDirInfo(
+                ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+        // prepare leafNode dir
+        OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(
+                omPathInfo.getLeafNodeName(),
+                keyArgs, omPathInfo.getLeafNodeObjectId(),
+                omPathInfo.getLastKnownParentId(), trxnLogIndex,
+                OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
+        OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+                Optional.of(dirInfo), Optional.of(missingParentInfos),
+                trxnLogIndex);
+
+        // total number of keys created.
+        numKeysCreated = missingParentInfos.size() + 1;
+
+        result = OMDirectoryCreateRequest.Result.SUCCESS;
+        omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(),
+                dirInfo, missingParentInfos, result);
+      } else {
+        result = Result.DIRECTORY_ALREADY_EXISTS;
+        omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS);
+        omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(),
+            result);
+      }
+    } catch (IOException ex) {
+      exception = ex;
+      omClientResponse = new OMDirectoryCreateResponseV1(
+          createErrorOMResponse(omResponse, exception), result);
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+          omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+            bucketName);
+      }
+    }
+
+    auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY,
+        auditMap, exception, userInfo));
+
+    logResult(createDirectoryRequest, keyArgs, omMetrics, numKeysCreated,
+            result, exception);
+
+    return omClientResponse;
+  }
+
+  private void logResult(CreateDirectoryRequest createDirectoryRequest,
+                         KeyArgs keyArgs, OMMetrics omMetrics, int numKeys,
+                         Result result,
+                         IOException exception) {
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.incNumKeys(numKeys);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Directory created. Volume:{}, Bucket:{}, Key:{}",
+            volumeName, bucketName, keyName);
+      }
+      break;
+    case DIRECTORY_ALREADY_EXISTS:
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}",
+            volumeName, bucketName, keyName, exception);
+      }
+      break;
+    case FAILURE:
+      omMetrics.incNumCreateDirectoryFails();
+      LOG.error("Directory creation failed. Volume:{}, Bucket:{}, Key{}. " +
+          "Exception:{}", volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMDirectoryCreateRequest: {}",
+          createDirectoryRequest);
+    }
+  }
+
+  /**
+   * Construct OmDirectoryInfo for every parent directory in missing list.
+   * @param ozoneManager
+   * @param keyArgs
+   * @param pathInfo list of parent directories to be created and its ACLs
+   * @param trxnLogIndex
+   * @return
+   * @throws IOException
+   */
+  public static List<OmDirectoryInfo> getAllParentDirInfo(
+          OzoneManager ozoneManager, KeyArgs keyArgs,
+          OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex)
+          throws IOException {
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    List<OmDirectoryInfo> missingParentInfos = new ArrayList<>();
+
+    ImmutablePair<Long, Long> objIdRange = OMFileRequest
+            .getObjIdRangeFromTxId(trxnLogIndex);
+    long baseObjId = objIdRange.getLeft();
+    long maxObjId = objIdRange.getRight();
+    long maxLevels = maxObjId - baseObjId;
+    long objectCount = 1;
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    long lastKnownParentId = pathInfo.getLastKnownParentId();
+    List<String> missingParents = pathInfo.getMissingParents();
+    List<OzoneAcl> inheritAcls = pathInfo.getAcls();
+    for (String missingKey : missingParents) {
+      long nextObjId = baseObjId + objectCount;
+      if (nextObjId > maxObjId) {
+        throw new OMException("Too many directories in path. Exceeds limit of "
+                + maxLevels + ". Unable to create directory: " + keyName
+                + " in volume/bucket: " + volumeName + "/" + bucketName,
+                INVALID_KEY_NAME);
+      }
+
+      LOG.debug("missing parent {} getting added to DirectoryTable",
+              missingKey);
+      OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey,
+              keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, inheritAcls);
+      objectCount++;
+
+      missingParentInfos.add(dirInfo);
+
+      // updating id for the next sub-dir
+      lastKnownParentId = nextObjId;
+    }
+    pathInfo.setLastKnownParentId(lastKnownParentId);
+    pathInfo.setLeafNodeObjectId(baseObjId + objectCount);
+    return missingParentInfos;
+  }
+
+  /**
+   * Fill in a DirectoryInfo for a new directory entry in OM database.
+   * without initializing ACLs from the KeyArgs - used for intermediate
+   * directories which get created internally/recursively during file
+   * and directory create.
+   * @param dirName
+   * @param keyArgs
+   * @param objectId
+   * @param parentObjectId
+   * @param inheritAcls
+   * @return the OmDirectoryInfo structure
+   */
+  public static OmDirectoryInfo createDirectoryInfoWithACL(
+          String dirName, KeyArgs keyArgs, long objectId,
+          long parentObjectId, long transactionIndex,
+          List<OzoneAcl> inheritAcls) {
+
+    return OmDirectoryInfo.newBuilder()
+            .setName(dirName)
+            .setCreationTime(keyArgs.getModificationTime())
+            .setModificationTime(keyArgs.getModificationTime())
+            .setObjectID(objectId)
+            .setUpdateID(transactionIndex)
+            .setParentObjectID(parentObjectId)
+            .setAcls(inheritAcls).build();
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 21ffff8..aa9592c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.request.file;
 import java.io.IOException;
 import java.nio.file.Path;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 
 import com.google.common.base.Optional;
@@ -29,8 +30,12 @@ import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -130,6 +135,117 @@ public final class OMFileRequest {
   }
 
   /**
+   * Verify any dir/key exist in the given path in the specified
+   * volume/bucket by iterating through directory table.
+   *
+   * @param omMetadataManager OM Metadata manager
+   * @param volumeName        volume name
+   * @param bucketName        bucket name
+   * @param keyName           key name
+   * @param keyPath           path
+   * @return OMPathInfoV1 path info object
+   * @throws IOException on DB failure
+   */
+  public static OMPathInfoV1 verifyDirectoryKeysInPath(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull String volumeName,
+          @Nonnull String bucketName, @Nonnull String keyName,
+          @Nonnull Path keyPath) throws IOException {
+
+    String leafNodeName = OzoneFSUtils.getFileName(keyName);
+    List<String> missing = new ArrayList<>();
+
+    // Found no files/ directories in the given path.
+    OMDirectoryResult result = OMDirectoryResult.NONE;
+
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    // by default, inherit bucket ACLs
+    List<OzoneAcl> inheritAcls = omBucketInfo.getAcls();
+
+    long lastKnownParentId = omBucketInfo.getObjectID();
+    String dbDirName = ""; // absolute path for trace logs
+    // for better logging
+    StringBuilder fullKeyPath = new StringBuilder(bucketKey);
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+      fullKeyPath.append(OzoneConsts.OM_KEY_PREFIX);
+      fullKeyPath.append(fileName);
+      if (missing.size() > 0) {
+        // Add all the sub-dirs to the missing list except the leaf element.
+        // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt.
+        // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list.
+        if(elements.hasNext()){
+          // skips leaf node.
+          missing.add(fileName);
+        }
+        continue;
+      }
+
+      // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt
+      // 1. Do lookup on directoryTable. If not exists goto next step.
+      // 2. Do look on keyTable. If not exists goto next step.
+      // 3. Add 'sub-dir' to missing parents list
+      String dbNodeName = omMetadataManager.getOzonePathKey(
+              lastKnownParentId, fileName);
+      OmDirectoryInfo omDirInfo = omMetadataManager.getDirectoryTable().
+              get(dbNodeName);
+      if (omDirInfo != null) {
+        dbDirName += omDirInfo.getName() + OzoneConsts.OZONE_URI_DELIMITER;
+        if (elements.hasNext()) {
+          result = OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
+          lastKnownParentId = omDirInfo.getObjectID();
+          inheritAcls = omDirInfo.getAcls();
+          continue;
+        } else {
+          // Checked all the sub-dirs till the leaf node.
+          // Found a directory in the given path.
+          result = OMDirectoryResult.DIRECTORY_EXISTS;
+        }
+      } else {
+        // Get parentID from the lastKnownParent. For any files, directly under
+        // the bucket, the parent is the bucketID. Say, "/vol1/buck1/file1"
+        // TODO: Need to add UT for this case along with OMFileCreateRequest.
+        if (omMetadataManager.getKeyTable().isExist(dbNodeName)) {
+          if (elements.hasNext()) {
+            // Found a file in the given key name.
+            result = OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
+          } else {
+            // Checked all the sub-dirs till the leaf file.
+            // Found a file with the given key name.
+            result = OMDirectoryResult.FILE_EXISTS;
+          }
+          break; // Skip directory traversal as it hits key.
+        }
+
+        // Add to missing list, there is no such file/directory with given name.
+        if (elements.hasNext()) {
+          missing.add(fileName);
+        }
+      }
+    }
+
+    LOG.trace("verifyFiles/Directories in Path : " + "/" + volumeName
+            + "/" + bucketName + "/" + keyName + ":" + result);
+
+    if (result == OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH || result ==
+            OMDirectoryResult.FILE_EXISTS) {
+      return new OMPathInfoV1(leafNodeName, lastKnownParentId, missing,
+              result, inheritAcls, fullKeyPath.toString());
+    }
+
+    String dbDirKeyName = omMetadataManager.getOzoneDirKey(volumeName,
+            bucketName, dbDirName);
+    LOG.trace("Acls inherited from parent " + dbDirKeyName + " are : "
+            + inheritAcls);
+
+    return new OMPathInfoV1(leafNodeName, lastKnownParentId, missing,
+            result, inheritAcls);
+  }
+
+  /**
    * Get the valid base object id given the transaction id.
    * @param id of the transaction
    * @return base object id allocated against the transaction
@@ -156,6 +272,59 @@ public final class OMFileRequest {
     return new ImmutablePair<>(baseId, maxAvailableId);
   }
 
+
+  /**
+   * Class to return the results from verifyDirectoryKeysInPath.
+   * Includes the list of missing intermediate directories and
+   * the directory search result code.
+   */
+  public static class OMPathInfoV1 extends OMPathInfo{
+    private String leafNodeName;
+    private long lastKnownParentId;
+    private long leafNodeObjectId;
+    private String fileExistsInPath;
+
+    public OMPathInfoV1(String leafNodeName, long lastKnownParentId,
+                        List missingParents, OMDirectoryResult result,
+                        List<OzoneAcl> aclList, String fileExistsInPath) {
+      super(missingParents, result, aclList);
+      this.leafNodeName = leafNodeName;
+      this.lastKnownParentId = lastKnownParentId;
+      this.fileExistsInPath = fileExistsInPath;
+    }
+
+    public OMPathInfoV1(String leafNodeName, long lastKnownParentId,
+                        List missingParents, OMDirectoryResult result,
+                        List<OzoneAcl> aclList) {
+      this(leafNodeName, lastKnownParentId, missingParents, result, aclList,
+              "");
+    }
+
+    public String getLeafNodeName() {
+      return leafNodeName;
+    }
+
+    public long getLeafNodeObjectId() {
+      return leafNodeObjectId;
+    }
+
+    public void setLeafNodeObjectId(long leafNodeObjectId) {
+      this.leafNodeObjectId = leafNodeObjectId;
+    }
+
+    public void setLastKnownParentId(long lastKnownParentId) {
+      this.lastKnownParentId = lastKnownParentId;
+    }
+
+    public long getLastKnownParentId() {
+      return lastKnownParentId;
+    }
+
+    public String getFileExistsInPath() {
+      return fileExistsInPath;
+    }
+  }
+
   /**
    * Class to return the results from verifyFilesInPath.
    * Includes the list of missing intermediate directories and
@@ -254,4 +423,34 @@ public final class OMFileRequest {
           new CacheValue<>(keyInfo, index));
     }
   }
+
+  /**
+   * Adding directory info to the Table cache.
+   *
+   * @param omMetadataManager  OM Metdata Manager
+   * @param dirInfo            directory info
+   * @param missingParentInfos list of the parents to be added to DB
+   * @param trxnLogIndex       transaction log index
+   */
+  public static void addDirectoryTableCacheEntries(
+          OMMetadataManager omMetadataManager,
+          Optional<OmDirectoryInfo> dirInfo,
+          Optional<List<OmDirectoryInfo>> missingParentInfos,
+          long trxnLogIndex) {
+    for (OmDirectoryInfo subDirInfo : missingParentInfos.get()) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(omMetadataManager.getOzonePathKey(
+                      subDirInfo.getParentObjectID(), subDirInfo.getName())),
+              new CacheValue<>(Optional.of(subDirInfo), trxnLogIndex));
+    }
+
+    if (dirInfo.isPresent()) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(omMetadataManager.getOzonePathKey(
+                      dirInfo.get().getParentObjectID(),
+                      dirInfo.get().getName())),
+              new CacheValue<>(dirInfo, trxnLogIndex));
+    }
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java
new file mode 100644
index 0000000..4e93fa7
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+
+/**
+ * Response for create directory request.
+ */
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE})
+public class OMDirectoryCreateResponseV1 extends OMClientResponse {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(OMDirectoryCreateResponseV1.class);
+
+  private OmDirectoryInfo dirInfo;
+  private List<OmDirectoryInfo> parentDirInfos;
+  private Result result;
+
+  public OMDirectoryCreateResponseV1(@Nonnull OMResponse omResponse,
+                                     @Nonnull OmDirectoryInfo dirInfo,
+                                     @Nonnull List<OmDirectoryInfo> pDirInfos,
+                                     @Nonnull Result result) {
+    super(omResponse);
+    this.dirInfo = dirInfo;
+    this.parentDirInfos = pDirInfos;
+    this.result = result;
+  }
+
+  /**
+   * For when the request is not successful or the directory already exists.
+   */
+  public OMDirectoryCreateResponseV1(@Nonnull OMResponse omResponse,
+                                     @Nonnull Result result) {
+    super(omResponse);
+    this.result = result;
+  }
+
+  @Override
+  protected void addToDBBatch(OMMetadataManager omMetadataManager,
+                              BatchOperation batchOperation)
+          throws IOException {
+    addToDirectoryTable(omMetadataManager, batchOperation);
+  }
+
+  private void addToDirectoryTable(OMMetadataManager omMetadataManager,
+                                BatchOperation batchOperation)
+          throws IOException {
+    if (dirInfo != null) {
+      if (parentDirInfos != null) {
+        for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+          String parentKey = omMetadataManager
+                  .getOzonePathKey(parentDirInfo.getParentObjectID(),
+                          parentDirInfo.getName());
+          LOG.debug("putWithBatch parent : dir {} info : {}", parentKey,
+                  parentDirInfo);
+          omMetadataManager.getDirectoryTable()
+                  .putWithBatch(batchOperation, parentKey, parentDirInfo);
+        }
+      }
+
+      String dirKey = omMetadataManager.getOzonePathKey(
+              dirInfo.getParentObjectID(), dirInfo.getName());
+      omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, dirKey,
+              dirInfo);
+    } else {
+      // When directory already exists, we don't add it to cache. And it is
+      // not an error, in this case dirKeyInfo will be null.
+      LOG.debug("Response Status is OK, dirKeyInfo is null in " +
+              "OMDirectoryCreateResponseV1");
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index a9caaa2..acfbb6e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -38,6 +38,8 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -230,6 +232,25 @@ public final class TestOMRequestUtils {
   }
 
   /**
+   * Add dir key entry to DirectoryTable.
+   *
+   * @throws Exception
+   */
+  public static void addDirKeyToDirTable(boolean addToCache,
+                                         OmDirectoryInfo omDirInfo,
+                                         long trxnLogIndex,
+                                         OMMetadataManager omMetadataManager)
+          throws Exception {
+    String ozoneKey = omDirInfo.getPath();
+    if (addToCache) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(ozoneKey),
+              new CacheValue<>(Optional.of(omDirInfo), trxnLogIndex));
+    }
+    omMetadataManager.getDirectoryTable().put(ozoneKey, omDirInfo);
+  }
+
+  /**
    * Create OmKeyInfo.
    */
   public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
@@ -240,6 +261,22 @@ public final class TestOMRequestUtils {
   }
 
   /**
+   * Create OmDirectoryInfo.
+   */
+  public static OmDirectoryInfo createOmDirectoryInfo(String keyName,
+                                                      long objectID,
+                                                      long parentObjID) {
+    return new OmDirectoryInfo.Builder()
+            .setName(keyName)
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setObjectID(objectID)
+            .setParentObjectID(parentObjID)
+            .setUpdateID(objectID)
+            .build();
+  }
+
+  /**
    * Create OmKeyInfo.
    */
   public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
new file mode 100644
index 0000000..77cf74b
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -0,0 +1,649 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditMessage;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ResolvedBucket;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.jetbrains.annotations.NotNull;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test OM directory create request V1 layout version.
+ */
+public class TestOMDirectoryCreateRequestV1 {
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private OzoneManager ozoneManager;
+  private OMMetrics omMetrics;
+  private OMMetadataManager omMetadataManager;
+  private AuditLogger auditLogger;
+  // Just setting ozoneManagerDoubleBuffer which does nothing.
+  private OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
+          ((response, transactionIndex) -> {
+            return null;
+          });
+
+  @Before
+  public void setup() throws Exception {
+    ozoneManager = Mockito.mock(OzoneManager.class);
+    omMetrics = OMMetrics.create();
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+            folder.newFolder().getAbsolutePath());
+    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
+    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
+    auditLogger = Mockito.mock(AuditLogger.class);
+    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
+    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+    when(ozoneManager.resolveBucketLink(any(KeyArgs.class),
+            any(OMClientRequest.class)))
+            .thenReturn(new ResolvedBucket(Pair.of("", ""), Pair.of("", "")));
+  }
+
+  @After
+  public void stop() {
+    omMetrics.unRegister();
+    Mockito.framework().clearInlineMocks();
+  }
+
+  @Test
+  public void testPreExecute() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    String keyName = "a/b/c";
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirectoryCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest =
+            omDirectoryCreateRequestV1.preExecute(ozoneManager);
+
+    // As in preExecute, we modify original request.
+    Assert.assertNotEquals(omRequest, modifiedOmRequest);
+  }
+
+  @Test
+  public void testValidateAndUpdateCache() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.OK);
+    verifyDirectoriesInDB(dirs, bucketID);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(VOLUME_NOT_FOUND,
+            omClientResponse.getOMResponse().getStatus());
+
+    // Key should not exist in DB
+    Assert.assertTrue("Unexpected directory entries!",
+            omMetadataManager.getDirectoryTable().isEmpty());
+
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
+
+    // Key should not exist in DB
+    Assert.assertTrue("Unexpected directory entries!",
+            omMetadataManager.getDirectoryTable().isEmpty());
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithSubDirectoryInPath()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+    int objID = 100;
+
+    //1. Create root
+    OmDirectoryInfo omDirInfo =
+            TestOMRequestUtils.createOmDirectoryInfo(dirs.get(0), objID++,
+                    bucketID);
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000,
+            omMetadataManager);
+    //2. Create sub-directory under root
+    omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(dirs.get(1), objID++,
+            omDirInfo.getObjectID());
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000,
+            omMetadataManager);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.OK);
+
+    // Key should exist in DB and cache.
+    verifyDirectoriesInDB(dirs, bucketID);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithDirectoryAlreadyExists()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    // bucketID is the parent
+    long parentID = bucketID;
+
+    // add all the directories into DirectoryTable
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      long objID = 100 + indx;
+      long txnID = 5000 + indx;
+      // for index=0, parentID is bucketID
+      OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+              dirs.get(indx), objID, parentID);
+      TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo,
+              txnID, omMetadataManager);
+
+      parentID = omDirInfo.getObjectID();
+    }
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.DIRECTORY_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should exist in DB and doesn't added to cache.
+    verifyDirectoriesInDB(dirs, bucketID);
+    verifyDirectoriesNotInCache(dirs, bucketID);
+  }
+
+  /**
+   * Case: File exists with the same name as the requested directory.
+   * Say, requested to createDir '/a/b/c' and there is a file exists with
+   * same name.
+   */
+  @Test
+  public void testValidateAndUpdateCacheWithFilesInPath() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long parentID = omBucketInfo.getObjectID();
+
+    // add all the parent directories into DirectoryTable. This won't create
+    // the leaf node and this will be used in CreateDirectoryReq.
+    for (int indx = 0; indx < dirs.size() - 1; indx++) {
+      long objID = 100 + indx;
+      long txnID = 5000 + indx;
+      // for index=0, parentID is bucketID
+      OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+              dirs.get(indx), objID, parentID);
+      TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo,
+              txnID, omMetadataManager);
+
+      parentID = omDirInfo.getObjectID();
+    }
+
+    long objID = parentID + 100;
+    long txnID = 50000;
+
+    // Add a file into the FileTable, this is to simulate "file exists" check.
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE, objID++);
+    String ozoneFileName = parentID + "/" + dirs.get(dirs.size() - 1);
+    omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneFileName),
+            new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+    omMetadataManager.getKeyTable().put(ozoneFileName, omKeyInfo);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest =
+            omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should not exist in DB
+    Assert.assertNotNull(omMetadataManager.getKeyTable().get(ozoneFileName));
+    // Key should not exist in DB
+    Assert.assertEquals("Wrong directories count!", 3,
+            omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+  }
+
+
+  /**
+   * Case: File exists in the given path.
+   * Say, requested to createDir '/a/b/c/d' and there is a file '/a/b' exists
+   * in the given path.
+   */
+  @Test
+  public void testValidateAndUpdateCacheWithFileExistsInGivenPath()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long parentID = omBucketInfo.getObjectID();
+
+    long objID = parentID + 100;
+    long txnID = 5000;
+
+    // for index=0, parentID is bucketID
+    OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+            dirs.get(0), objID++, parentID);
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo,
+            txnID, omMetadataManager);
+    parentID = omDirInfo.getObjectID();
+
+    // Add a key in second level.
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE, objID++);
+    String ozoneKey = parentID + "/" + dirs.get(1);
+    omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
+            new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+    omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest =
+            omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue("Invalid response code:" +
+                    omClientResponse.getOMResponse().getStatus(),
+            omClientResponse.getOMResponse().getStatus()
+                    == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should not exist in DB
+    Assert.assertTrue(omMetadataManager.getKeyTable().get(ozoneKey) != null);
+    // Key should not exist in DB
+    Assert.assertEquals("Wrong directories count!",
+            1, omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+  }
+
+  @Test
+  public void testCreateDirectoryUptoLimitOfMaxLevels255() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 255);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    verifyDirectoriesInDB(dirs, bucketID);
+
+    Assert.assertEquals(dirs.size(), omMetrics.getNumKeys());
+  }
+
+  @Test
+  public void testCreateDirectoryExceedLimitOfMaxLevels255() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 256);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager,
+                    100L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME,
+            omClientResponse.getOMResponse().getStatus());
+
+    Assert.assertEquals("Unexpected directories!", 0,
+            omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+
+    Assert.assertEquals(0, omMetrics.getNumKeys());
+  }
+
+  @Test
+  public void testCreateDirectoryOMMetric() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+            new OMDirectoryCreateRequestV1(omRequest);
+
+    OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+    omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    verifyDirectoriesInDB(dirs, bucketID);
+
+    Assert.assertEquals(dirs.size(), omMetrics.getNumKeys());
+  }
+
+
+  @NotNull
+  private String createDirKey(List<String> dirs, int depth) {
+    String keyName = RandomStringUtils.randomAlphabetic(5);
+    dirs.add(keyName);
+    for (int i = 0; i < depth; i++) {
+      String dirName = RandomStringUtils.randomAlphabetic(5);
+      dirs.add(dirName);
+      keyName += "/" + dirName;
+    }
+    return keyName;
+  }
+
+  private void verifyDirectoriesInDB(List<String> dirs, long bucketID)
+          throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      OmDirectoryInfo omDirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      Assert.assertNotNull("Invalid directory!", omDirInfo);
+      Assert.assertEquals("Invalid directory!", dirName, omDirInfo.getName());
+      Assert.assertEquals("Invalid dir path!",
+              parentID + "/" + dirName, omDirInfo.getPath());
+      parentID = omDirInfo.getObjectID();
+    }
+  }
+
+  private void verifyDirectoriesNotInCache(List<String> dirs, long bucketID)
+          throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      CacheValue<OmDirectoryInfo> omDirInfoCacheValue =
+              omMetadataManager.getDirectoryTable()
+                      .getCacheValue(new CacheKey<>(dbKey));
+      Assert.assertNull("Unexpected directory!", omDirInfoCacheValue);
+    }
+  }
+
+  /**
+   * Create OMRequest which encapsulates CreateDirectory request.
+   *
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @return OMRequest
+   */
+  private OMRequest createDirectoryRequest(String volumeName, String bucketName,
+                                           String keyName) {
+    return OMRequest.newBuilder().setCreateDirectoryRequest(
+            CreateDirectoryRequest.newBuilder().setKeyArgs(
+                    KeyArgs.newBuilder().setVolumeName(volumeName)
+                            .setBucketName(bucketName).setKeyName(keyName)))
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
+            .setClientId(UUID.randomUUID().toString()).build();
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
new file mode 100644
index 0000000..0a1114a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Tests OMDirectoryCreateResponseV1 new layout version.
+ */
+public class TestOMDirectoryCreateResponseV1 {
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private OMMetadataManager omMetadataManager;
+  private BatchOperation batchOperation;
+
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+        folder.newFolder().getAbsolutePath());
+    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+    batchOperation = omMetadataManager.getStore().initBatchOperation();
+  }
+
+  @Test
+  public void testAddToDBBatch() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+
+    long parentID = 100;
+    OmDirectoryInfo omDirInfo =
+            TestOMRequestUtils.createOmDirectoryInfo(keyName, 500, parentID);
+
+    OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse(
+        OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance())
+            .setStatus(OzoneManagerProtocolProtos.Status.OK)
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
+            .build();
+
+    OMDirectoryCreateResponseV1 omDirectoryCreateResponseV1 =
+        new OMDirectoryCreateResponseV1(omResponse, omDirInfo,
+            new ArrayList<>(), OMDirectoryCreateRequestV1.Result.SUCCESS);
+
+    omDirectoryCreateResponseV1.addToDBBatch(omMetadataManager, batchOperation);
+
+    // Do manual commit and see whether addToBatch is successful or not.
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    Assert.assertNotNull(omMetadataManager.getDirectoryTable().get(
+            omMetadataManager.getOzonePathKey(parentID, keyName)));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org