You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by av...@apache.org on 2020/10/06 20:24:27 UTC

[hadoop-ozone] branch HDDS-3698-upgrade updated (b07927e -> 8fe8a1f)

This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a change to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.


 discard b07927e  HDDS-4253. Add LayoutVersion request/response for DN registration. (#1457)
    omit a2788b7  HDDS-4227. Implement a 'Prepare For Upgrade' step in OM that applies all committed Ratis transactions. (#1430)
    omit c6f51ee3 HDDS-4252. Add the current layout versions to DN - SCM proto payload. (#1432)
    omit 86c4c9b  HDDS-4143. Implement a factory for OM Requests that returns an instance based on layout version. (#1405)
    omit 75ea5bf  HDDS-4174. Add current HDDS layout version to Datanode heartbeat/registration (#1421)
    omit 54e5982  HDDS-4141. Implement Finalize command in Ozone Manager client. (#1400)
    omit 0731098  HDDS-4173.  Implement HDDS Version management using the LayoutVersionManager interface. (#1392)
    omit d65827c  HDDS-3829. Introduce Layout Feature interface in Ozone. (#1322)
     add ceeca92  HDDS-4201. Improve the performance of OmKeyLocationInfoGroup (#1381)
     add 642d660  HDDS-4186: Adjust RetryPolicy of SCMConnectionManager for SCM/Recon (#1373)
     add f254183  HDDS-3725. Ozone sh volume client support quota option. (#1233)
     add acfef2d  HDDS-4205. Disable coverage upload to codecov for pull requests (#1394)
     add 549a1a0  HDDS-4197. Failed to load existing service definition files: ...SubcommandWithParent (#1386)
     add 7bf205c  Removing an archaic reference to Skaffold in the README and other little improvements (#1360)
     add 157864a  HDDS-4208. Fix table rendering and logo display in docs (#1391)
     add b12f6b6  HDDS-4161. Set fs.defaultFS in docker compose cluster config to OFS (#1362)
     add dc49daa  HDDS-4198. Compile Ozone with multiple Java versions (#1387)
     add 4b325a8  HDDS-4193. Range used by S3 MultipartUpload copy-from-source should be incusive (#1384)
     add ce02172  HDDS-4202. Upgrade ratis to 1.1.0-ea949f1-SNAPSHOT (#1382)
     add 53353c0  HDDS-4204. upgrade docker environment does not work with KEEP_RUNNING=true (#1388)
     add 0a490cb  HDDS-3441. Enable TestKeyManagerImpl test cases. (#1326)
     add d2c0470  HDDS-4213. Log when a datanode has become dead in the DeadNodeHandler (#1402)
     add 9a4cb9e  HDDS-3151. TestCloseContainerHandlingByClient Enable-testMultiBlockW… (#1333)
     add 49e3a1a  HDDS-4064. Show container verbose info with verbose option (#1290)
     add 22e4288  HDDS-4170 - Fix typo in method description. (#1406)
     add 971a36e  HDDS-4150. recon.api.TestEndpoints test is flaky (#1396)
     add bfa2801  HDDS-4211. [OFS] Better owner and group display for listing Ozone volumes and buckets (#1397)
     add a7c72fb  HDDS-4196. Add an endpoint in Recon to query Prometheus (#1390)
     add 4b96d83  HDDS-4228: add field 'num' to ALLOCATE_BLOCK of scm audit log. (#1413)
     add 04ac1ef  HDDS-4129. change MAX_QUOTA_IN_BYTES to Long.MAX_VALUE. (#1337)
     add 2134c2e  HDDS-4218.Remove test TestRatisManager (#1409)
     add ab7481d  HDDS-4217.Remove test TestOzoneContainerRatis (#1408)
     add 72e3215  HDDS-4119. Improve performance of the BufferPool management of Ozone client (#1336)
     add 48e8e50  HDDS-3927. Rename Ozone OM,DN,SCM runtime options to conform to naming conventions (#1401)
     add 68869d1  HDDS-4155. Directory and filename can end up with same name in a path. (#1361)
     add def697f  HDDS-4039. Reduce the number of fields in hdds.proto to improve performance (#1289)
     add 8e71e81  HDDS-4166. Documentation index page redirects to the wrong address (#1372)
     add 045aa71  HDDS-4075. Retry request on different OM on AccessControlException (#1303)
     add e84f5ce  HDDS-4210. ResolveBucket during checkAcls fails. (#1398)
     add 7beb2d0  HDDS-4053. Volume space: add quotaUsageInBytes and update it when write and delete key. (#1296)
     add 570d34c  HDDS-4244. Container deleted wrong replica cause mis-replicated. (#1423)
     add 69c3e0e  HDDS-4250. Fix wrong logger name (#1429)
     add 079ee7f  HDDS-4104. Provide a way to get the default value and key of java-based-configuration easily (#1369)
     add 241de5a  HDDS-4241. Support HADOOP_TOKEN_FILE_LOCATION for Ozone token CLI. (#1422)
     add 0da6cfd  HDDS-4247. Fixed log4j usage in some places (#1426)
     add 410a246  HDDS-4255. Remove unused Ant and Jdiff dependency versions (#1433)
     add 68d1ab0  HDDS-3981. Add more debug level log to XceiverClientGrpc for debug purpose (#1214)
     add ce0c072  HDDS-3102. ozone getconf command should use the GenericCli parent class (#1410)
     add 1e9ff6c  HDDS-3947: Sort DNs for client when the key is a file for #getFileStatus #listStatus APIs (#1385)
     add f3a60dc  HDDS-4233. Interrupted exeception printed out from DatanodeStateMachine (#1416)
     add b281d62  HDDS-4206. Attempt pipeline creation more frequently in acceptance tests (#1389)
     add 0eceb4c  HDDS-2766. security/SecuringDataNodes.md (#1175)
     add a78a4b7  HDDS-4254. Bucket space: add usedBytes and update it when create and delete key. (#1431)
     add 8ca694a  HDDS-4236. Move "Om*Codec.java" to new project hadoop-ozone/interface-storage (#1424)
     add 8899ff7  HDDS-4324. Add important comment to ListVolumes logic (#1417)
     add 261d34d  HDDS-3297. Enable TestOzoneClientKeyGenerator. (#1442)
     add 64026dd  HDDS-2660. Create insight point for datanode container protocol (#1272)
     add 5f1900a  HDDS-4270. Add more reusable byteman scripts to debug ofs/o3fs performance (#1443)
     add c955729  HDDS-4194. Create a script to check AWS S3 compatibility (#1383)
     add 7d0d330  HDDS-4282. Improve the emptyDir syntax (#1450)
     add f1cdbe7  HDDS-4263. ReplicatiomManager shouldn't consider origin node Id for CLOSED containers. (#1438)
     add 004dd3f  HDDS-4102. Normalize Keypath for lookupKey. (#1328)
     add 525ecbb  HDDS-3727. Volume space: check quotaUsageInBytes when write key. (#1434)
     add 6267a39  HDDS-4231. Background Service blocks on task results. (#1414)
     add 34f3b91  HDDS-4232. Use single thread for KeyDeletingService. (#1415)
     add 68642c2  HDDS-4023. Delete closed container after all blocks have been deleted. (#1338)
     add 3ad1034  HDDS-4215. Update Freon doc in source tree. (#1403)
     add 2420ee8  HDDS-4288. the icon of hadoop-ozone is bigger than ever (#1452)
     add fdcc696  HDDS-4287: Exclude protobuff classes from ozone-filesystem-hadoop3 jars (#1455). Contributed by Uma Maheswara Rao G.
     add d1ac423  HDDS-3751. Ozone sh client support bucket quota option. (#1412)
     add be25991  HDDS-4292. Ozone Client not working with Hadoop Version < 3.2 (#1463)
     add d6a1836  HDDS-4251. Update Ratis version to latest snapshot (#1462)
     add 275653e  HDDS-3869. Use different column families for datanode block and metadata (#1298)
     add 819b455  HDDS-4302 : Shade the org.apache.common.lang3 package as this is coming from other hadoop packages as well. (#1469). Contributed by Uma Maheswara Rao G.
     add 7216e3c  HDDS-3966. Enable TestOMRatisSnapshots. (#1441)
     add 55c9df8  HDDS-4290. Enable insight point for SCM heartbeat protocol (#1453)
     add 60d2bcc  HDDS-4274. Change the log level of the SCM Delete block to improve performance. (#1446)
     add f8a62d6  HDDS-3810. Add the logic to distribute open containers among the pipelines of a datanode. (#1274)
     add 5719615  HDDS-4304. Close Container event can fail if pipeline is removed first. (#1471)
     add 8cd86a6  HDDS-4299. Display Ratis version with ozone version (#1464)
     add cfff097  HDDS-4271. Avoid logging chunk content in Ozone Insight (#1466)
     add 4ad0318  HDDS-4264. Uniform naming conventions of Ozone Shell Options. (#1447)
     add d6d27e4  HDDS-4242. Copy PrefixInfo proto to new project hadoop-ozone/interface-storage (#1444)
     add 19cb481  HDDS-4156. add hierarchical layout to Chinese doc (#1368)
     add b6efb95  HDDS-4280. Document notable configurations for Recon. (#1448)
     add 0d7d1e2  HDDS-4298. Use an interface in Ozone client instead of XceiverClientManager (#1460)
     add f9b1ca4  HDDS-4310: Ozone getconf broke the compatibility (#1475)
     new 3e21d75  HDDS-3829. Introduce Layout Feature interface in Ozone. (#1322)
     new 3fe83ce  HDDS-4173.  Implement HDDS Version management using the LayoutVersionManager interface. (#1392)
     new 9b55f69  HDDS-4141. Implement Finalize command in Ozone Manager client. (#1400)
     new 95b8712  HDDS-4174. Add current HDDS layout version to Datanode heartbeat/registration (#1421)
     new 6fa5fa1  HDDS-4143. Implement a factory for OM Requests that returns an instance based on layout version. (#1405)
     new b96834f  HDDS-4252. Add the current layout versions to DN - SCM proto payload. (#1432)
     new 08d8d36  HDDS-4227. Implement a 'Prepare For Upgrade' step in OM that applies all committed Ratis transactions. (#1430)
     new 8fe8a1f  HDDS-4253. Add LayoutVersion request/response for DN registration. (#1457)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (b07927e)
            \
             N -- N -- N   refs/heads/HDDS-3698-upgrade (8fe8a1f)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 8 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .github/workflows/post-commit.yml                  |  20 +-
 .../byteman/appendlog.btm                          |  21 +-
 dev-support/byteman/hcfs-read.btm                  |  67 +++
 .../byteman/ratis-flush.btm                        |  27 +-
 .../byteman/ratis-no-flush.btm                     |  16 +-
 .../byteman/watchforcommit.btm                     |  28 +-
 dev-support/byteman/watchforcommit_all.btm         |  47 ++
 hadoop-hdds/client/pom.xml                         |  11 +
 .../hadoop/hdds/scm/XceiverClientFactory.java      |  23 +-
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |  18 +-
 .../hadoop/hdds/scm/XceiverClientManager.java      |  40 +-
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |   4 +-
 .../hadoop/hdds/scm/client/HddsClientUtils.java    |   8 +-
 .../hadoop/hdds/scm/storage/BlockInputStream.java  |  54 +-
 .../hadoop/hdds/scm/storage/BlockOutputStream.java | 143 +++--
 .../apache/hadoop/hdds/scm/storage/BufferPool.java |  49 +-
 .../hadoop/hdds/scm/storage/CommitWatcher.java     |  37 +-
 .../storage/TestBlockOutputStreamCorrectness.java  | 224 ++++++++
 .../hadoop/hdds/scm/storage/TestBufferPool.java}   |  37 +-
 hadoop-hdds/common/pom.xml                         |   5 +
 hadoop-hdds/common/src/main/conf/hadoop-env.sh     |  13 +-
 .../org/apache/hadoop/hdds/client/OzoneQuota.java  | 240 +++++---
 .../hadoop/hdds/protocol/DatanodeDetails.java      |  83 ++-
 .../RequestTypeDependentRetryPolicyCreator.java    |   8 +-
 .../apache/hadoop/hdds/recon/ReconConfigKeys.java  |   4 +
 .../hadoop/hdds/scm/ByteStringConversion.java      |  18 +-
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |   9 +
 .../apache/hadoop/hdds/scm/XceiverClientSpi.java   |   4 +-
 .../hadoop/hdds/scm/net/NetworkTopologyImpl.java   |   2 +-
 .../hdds/scm/storage/ContainerProtocolCalls.java   |  80 ++-
 .../x509/certificate/utils/CertificateCodec.java   |   2 +-
 .../hadoop/hdds/utils/BackgroundService.java       |  73 +--
 .../apache/hadoop/hdds/utils/BackgroundTask.java   |   4 +-
 .../hadoop/hdds/utils/BackgroundTaskQueue.java     |   5 +-
 .../{VersionInfo.java => RatisVersionInfo.java}    |  66 +--
 .../org/apache/hadoop/hdds/utils/Scheduler.java    |   2 +-
 .../org/apache/hadoop/hdds/utils/VersionInfo.java  |   9 +-
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |  50 +-
 .../apache/hadoop/ozone/common/ChunkBuffer.java    |  14 +-
 .../common/ChunkBufferImplWithByteBuffer.java      |  10 +-
 .../container/common/helpers/ChunkInfoList.java    |  56 ++
 .../common/src/main/resources/ozone-default.xml    |  17 +
 .../hadoop/hdds/protocol/MockDatanodeDetails.java  |   2 +-
 .../hadoop/hdds/scm/pipeline/MockPipeline.java     |  29 +-
 .../hadoop/ozone/common/TestChunkBuffer.java       |  16 +-
 .../hdds/conf/ConfigurationReflectionUtil.java     |  46 ++
 .../hdds/conf/TestConfigurationReflectionUtil.java | 111 ++++
 .../container/common/helpers/ContainerUtils.java   |  66 ++-
 .../container/common/impl/ContainerDataYaml.java   |   3 +
 .../container/common/impl/HddsDispatcher.java      |  61 +-
 .../container/common/interfaces/BlockIterator.java |   5 +-
 .../container/common/interfaces/Container.java     |   7 -
 .../common/statemachine/DatanodeStateMachine.java  |   7 +-
 .../common/statemachine/SCMConnectionManager.java  |   9 +-
 .../CloseContainerCommandHandler.java              |   2 +-
 .../CreatePipelineCommandHandler.java              |   2 +-
 .../commandhandler/DeleteBlocksCommandHandler.java |  69 ++-
 .../states/endpoint/RegisterEndpointTask.java      |   6 +-
 .../server/ratis/ContainerStateMachine.java        |   2 +-
 .../transport/server/ratis/XceiverServerRatis.java |   6 +-
 .../container/common/utils/ContainerCache.java     |  34 +-
 .../container/common/utils/HddsVolumeUtil.java     |   2 +-
 .../container/common/utils/ReferenceCountedDB.java |  10 +-
 .../container/keyvalue/KeyValueBlockIterator.java  | 156 -----
 .../container/keyvalue/KeyValueContainer.java      |  29 +-
 .../container/keyvalue/KeyValueContainerCheck.java |  12 +-
 .../container/keyvalue/KeyValueContainerData.java  |  48 +-
 .../ozone/container/keyvalue/KeyValueHandler.java  |  14 +-
 .../container/keyvalue/helpers/BlockUtils.java     |   2 +-
 .../keyvalue/helpers/KeyValueContainerUtil.java    | 194 ++++---
 .../container/keyvalue/impl/BlockManagerImpl.java  |  65 ++-
 .../background/BlockDeletingService.java           |  56 +-
 .../metadata/AbstractDatanodeDBDefinition.java     |  74 +++
 .../container/metadata/AbstractDatanodeStore.java  | 297 ++++++++++
 .../ozone/container/metadata/BlockDataCodec.java   |  47 ++
 .../container/metadata/ChunkInfoListCodec.java     |  45 ++
 .../metadata/DatanodeSchemaOneDBDefinition.java    |  91 +++
 .../metadata/DatanodeSchemaTwoDBDefinition.java    |  81 +++
 .../ozone/container/metadata/DatanodeStore.java    |  94 ++++
 .../metadata/DatanodeStoreSchemaOneImpl.java       |  49 ++
 .../metadata/DatanodeStoreSchemaTwoImpl.java       |  44 ++
 .../ozone/container/metadata/DatanodeTable.java    | 130 +++++
 .../metadata/SchemaOneChunkInfoListCodec.java      |  68 +++
 .../metadata/SchemaOneDeletedBlocksTable.java      | 180 ++++++
 .../container/metadata/SchemaOneKeyCodec.java      | 106 ++++
 .../ozone/container/metadata}/package-info.java    |  16 +-
 .../protocol/StorageContainerDatanodeProtocol.java |   8 +-
 ...inerDatanodeProtocolClientSideTranslatorPB.java |  16 +-
 ...inerDatanodeProtocolServerSideTranslatorPB.java |   5 +-
 .../hadoop/ozone/container/common/ScmTestMock.java |  12 +-
 .../container/common/TestBlockDeletingService.java | 113 +++-
 .../ozone/container/common/TestContainerCache.java |  25 +-
 .../common/TestKeyValueContainerData.java          |   3 +
 .../TestSchemaOneBackwardsCompatibility.java       | 626 +++++++++++++++++++++
 .../common/impl/TestContainerDataYaml.java         |   3 +
 .../keyvalue/TestKeyValueBlockIterator.java        | 340 +++++++----
 .../container/keyvalue/TestKeyValueContainer.java  |  75 +--
 .../keyvalue/TestKeyValueContainerCheck.java       |  18 +-
 .../container/ozoneimpl/TestContainerReader.java   |  43 +-
 .../container/ozoneimpl/TestOzoneContainer.java    |  14 +-
 .../test/resources/123-dn-container.db/000024.sst  | Bin 0 -> 1022 bytes
 .../test/resources/123-dn-container.db/000026.sst  | Bin 0 -> 827 bytes
 .../test/resources/123-dn-container.db/000032.sst  | Bin 0 -> 896 bytes
 .../test/resources/123-dn-container.db/000034.log  |   0
 .../src/test/resources/123-dn-container.db/CURRENT |   1 +
 .../test/resources/123-dn-container.db/IDENTITY    |   1 +
 .../resources/123-dn-container.db/MANIFEST-000033  | Bin 0 -> 297 bytes
 .../resources/123-dn-container.db/OPTIONS-000033   | 165 ++++++
 .../resources/123-dn-container.db/OPTIONS-000036   | 165 ++++++
 .../src/test/resources/123.container               |  10 +
 hadoop-hdds/docs/content/_index.md                 |   2 +-
 hadoop-hdds/docs/content/concept/Datanodes.zh.md   |   3 +
 hadoop-hdds/docs/content/concept/Overview.zh.md    |   7 +-
 hadoop-hdds/docs/content/concept/OzoneManager.md   |   8 +-
 .../docs/content/concept/OzoneManager.zh.md        |   3 +
 .../content/concept/StorageContainerManager.md     |   6 +-
 .../content/concept/StorageContainerManager.zh.md  |   3 +
 hadoop-hdds/docs/content/concept/_index.zh.md      |   2 +-
 hadoop-hdds/docs/content/feature/Observability.md  |   4 +-
 hadoop-hdds/docs/content/feature/Recon.md          |  18 +-
 hadoop-hdds/docs/content/interface/CSI.zh.md       |   3 +
 hadoop-hdds/docs/content/interface/JavaApi.zh.md   |   3 +
 hadoop-hdds/docs/content/interface/O3fs.zh.md      |   8 +-
 hadoop-hdds/docs/content/interface/S3.zh.md        |   3 +
 .../docs/content/security/SecureOzone.zh.md        |   6 +-
 .../docs/content/security/SecuringDatanodes.md     |  34 +-
 .../docs/content/security/SecuringDatanodes.zh.md  |  53 ++
 hadoop-hdds/docs/content/security/SecuringS3.zh.md |   3 +
 .../docs/content/security/SecuringTDE.zh.md        |   3 +
 .../docs/content/security/SecurityAcls.zh.md       |   3 +
 .../docs/content/security/SecurityWithRanger.zh.md |   3 +
 hadoop-hdds/docs/content/tools/TestTools.md        |  14 +-
 hadoop-hdds/docs/content/tools/TestTools.zh.md     |  14 +-
 .../themes/ozonedoc/layouts/_default/single.html   |   2 +-
 .../ozonedoc/layouts/partials/languages.html       |   3 +-
 .../themes/ozonedoc/layouts/partials/navbar.html   |   6 +-
 .../themes/ozonedoc/layouts/partials/sidebar.html  |   4 +-
 .../docs/themes/ozonedoc/static/css/ozonedoc.css   |  20 +-
 .../x509/certificate/authority/BaseApprover.java   |   2 +-
 .../server/OzoneProtocolMessageDispatcher.java     |  41 +-
 .../apache/hadoop/hdds/utils/HddsServerUtil.java   |  13 +
 .../hadoop/hdds/utils/MetadataKeyFilters.java      |  42 +-
 .../apache/hadoop/hdds/utils/db/DBDefinition.java  |  14 +-
 .../org/apache/hadoop/hdds/utils/db/DBStore.java   |   8 +-
 .../hadoop/hdds/utils/db/DBStoreBuilder.java       |  41 +-
 .../org/apache/hadoop/hdds/utils/db/RDBStore.java  |  17 +-
 .../org/apache/hadoop/hdds/utils/db/RDBTable.java  |  92 +++
 .../org/apache/hadoop/hdds/utils/db/Table.java     |  56 ++
 .../apache/hadoop/hdds/utils/db/TypedTable.java    |  46 ++
 .../apache/hadoop/hdds/server/TestJsonUtils.java   |   5 +-
 .../src/main/proto/DatanodeClientProtocol.proto    |   4 +
 .../interface-client/src/main/proto/hdds.proto     |  17 +-
 .../interface-client/src/main/resources/proto.lock |  40 +-
 .../proto/ScmServerDatanodeHeartbeatProtocol.proto |   2 +-
 .../interface-server/src/main/resources/proto.lock |   4 +-
 hadoop-hdds/pom.xml                                |   2 +
 .../hdds/scm/block/SCMBlockDeletingService.java    |   3 +-
 .../container/AbstractContainerReportHandler.java  |  51 +-
 .../hdds/scm/container/ContainerReportHandler.java |  14 +-
 .../IncrementalContainerReportHandler.java         |   2 +-
 .../hdds/scm/container/ReplicationManager.java     | 131 ++++-
 .../hdds/scm/container/SCMContainerManager.java    |  33 +-
 .../apache/hadoop/hdds/scm/node/DatanodeInfo.java  |  33 ++
 .../hadoop/hdds/scm/node/DeadNodeHandler.java      |   2 +-
 .../apache/hadoop/hdds/scm/node/NodeManager.java   |   6 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java     |  30 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |  53 +-
 .../scm/pipeline/BackgroundPipelineCreator.java    |   2 +-
 .../hadoop/hdds/scm/pipeline/PipelineManager.java  |   4 +-
 .../hdds/scm/pipeline/PipelinePlacementPolicy.java |   7 +-
 .../hdds/scm/pipeline/PipelineReportHandler.java   |   2 +-
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |   6 +-
 .../hdds/scm/pipeline/RatisPipelineUtils.java      |   2 +-
 .../hdds/scm/pipeline/SCMPipelineManager.java      |  16 +-
 .../SCMSecurityProtocolServerSideTranslatorPB.java |  17 +-
 ...lockLocationProtocolServerSideTranslatorPB.java |   2 +-
 ...inerLocationProtocolServerSideTranslatorPB.java |  17 +-
 .../hdds/scm/safemode/ContainerSafeModeRule.java   |   4 +-
 .../hdds/scm/server/SCMBlockProtocolServer.java    |   7 +-
 .../hdds/scm/server/SCMDatanodeProtocolServer.java |   4 +-
 .../java/org/apache/hadoop/hdds/scm/TestUtils.java |   1 +
 .../hadoop/hdds/scm/block/TestBlockManager.java    |  69 +++
 .../hadoop/hdds/scm/container/MockNodeManager.java |  28 +-
 .../scm/container/TestContainerReportHandler.java  |  33 ++
 .../hdds/scm/container/TestReplicationManager.java |  92 +++
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |   4 +-
 .../scm/pipeline/TestPipelinePlacementPolicy.java  |   4 +-
 .../hdds/scm/pipeline/TestSCMPipelineManager.java  |  74 +++
 .../ozone/container/common/TestEndPoint.java       |   2 +-
 .../testutils/ReplicationNodeManagerMock.java      |  12 +-
 .../hdds/scm/cli/container/InfoSubcommand.java     |  14 +-
 .../scm/cli/pipeline/CreatePipelineSubcommand.java |  10 +-
 .../org/apache/hadoop/ozone/client/BucketArgs.java |  43 +-
 .../apache/hadoop/ozone/client/ObjectStore.java    |   3 +
 .../apache/hadoop/ozone/client/OzoneBucket.java    |  93 +++
 .../apache/hadoop/ozone/client/OzoneVolume.java    | 103 +++-
 .../org/apache/hadoop/ozone/client/VolumeArgs.java |  45 +-
 .../ozone/client/io/BlockOutputStreamEntry.java    |  22 +-
 .../client/io/BlockOutputStreamEntryPool.java      |  29 +-
 .../hadoop/ozone/client/io/KeyInputStream.java     |  36 +-
 .../hadoop/ozone/client/io/KeyOutputStream.java    |  60 +-
 .../ozone/client/protocol/ClientProtocol.java      |  17 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  97 +++-
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |  26 +
 .../apache/hadoop/ozone/freon/OzoneGetConf.java    | 278 ---------
 .../hadoop/ozone/om/exceptions/OMException.java    |   4 +-
 .../ozone/om/exceptions/OMNotLeaderException.java  |   2 +-
 .../ozone/om/ha/OMFailoverProxyProvider.java       | 237 ++++++--
 .../hadoop/ozone/om/helpers/OmBucketArgs.java      |  50 +-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java      |  78 ++-
 .../ozone/om/helpers/OmKeyLocationInfoGroup.java   |   8 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java      |  64 ++-
 .../ozone/om/protocol/OzoneManagerProtocol.java    |   6 +-
 .../ozone/om/protocolPB/Hadoop3OmTransport.java    | 175 +-----
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  12 +-
 .../ozone/security/OzoneTokenIdentifier.java       |   5 +-
 .../apache/hadoop/ozone/util/OzoneVersionInfo.java |  15 +-
 .../hadoop/ozone/om/helpers/TestOmVolumeArgs.java  |   4 +-
 hadoop-ozone/dev-support/checks/build.sh           |   2 +-
 hadoop-ozone/dist/README.md                        |  52 +-
 .../dist/src/main/compose/ozone-csi/docker-config  |   3 +
 .../dist/src/main/compose/ozone-ha/docker-config   |   2 +-
 .../dist/src/main/compose/ozone-mr/common-config   |   3 +-
 .../src/main/compose/ozone-om-ha-s3/docker-config  |   4 +-
 .../dist/src/main/compose/ozone-om-ha-s3/test.sh   |   2 +
 .../src/main/compose/ozone-om-ha/docker-config     |   3 +-
 .../src/main/compose/ozone-topology/docker-config  |   7 +-
 .../dist/src/main/compose/ozone/docker-config      |   3 +
 .../src/main/compose/ozoneblockade/docker-config   |   2 +
 .../src/main/compose/ozones3-haproxy/docker-config |   3 +
 .../src/main/compose/ozonesecure-mr/docker-config  |   3 +-
 .../main/compose/ozonesecure-om-ha/docker-config   |   6 +-
 .../src/main/compose/ozonesecure-om-ha/test.sh     |   2 +
 .../src/main/compose/ozonesecure/docker-config     |   3 +-
 hadoop-ozone/dist/src/main/compose/testlib.sh      |   8 +-
 .../dist/src/main/compose/upgrade/docker-config    |   1 +
 hadoop-ozone/dist/src/main/compose/upgrade/test.sh |   3 +-
 .../main/k8s/definitions/ozone/freon/freon.yaml    |   2 +-
 .../getting-started/freon/freon-deployment.yaml    |   2 +-
 .../examples/minikube/freon/freon-deployment.yaml  |   2 +-
 .../ozone-dev/csi/csi-provisioner-deployment.yaml  |   2 +-
 .../examples/ozone-dev/freon/freon-deployment.yaml |   2 +-
 .../ozone/csi/csi-provisioner-deployment.yaml      |   2 +-
 .../k8s/examples/ozone/freon/freon-deployment.yaml |   2 +-
 .../src/main/smoketest/admincli/container.robot    |   5 +
 .../main/smoketest/auditparser/auditparser.robot   |   2 +-
 .../dist/src/main/smoketest/basic/basic.robot      |   2 +-
 .../loaddata.robot => basic/getconf.robot}         |  20 +-
 .../src/main/smoketest/basic/ozone-shell-lib.robot |  53 +-
 .../main/smoketest/basic/ozone-shell-single.robot  |   2 +-
 .../src/main/smoketest/basic/ozone-shell.robot     |   2 +-
 .../dist/src/main/smoketest/createbucketenv.robot  |   2 +-
 .../dist/src/main/smoketest/createmrenv.robot      |   2 +-
 .../src/main/smoketest/debug/ozone-debug.robot     |   2 +-
 .../dist/src/main/smoketest/freon/freon.robot      |   2 +-
 .../dist/src/main/smoketest/gdpr/gdpr.robot        |   2 +-
 .../dist/src/main/smoketest/ozonefs/setup.robot    |   8 +-
 .../dist/src/main/smoketest/recon/recon-api.robot  |   2 +-
 .../src/main/smoketest/s3/MultipartUpload.robot    | 130 ++---
 .../dist/src/main/smoketest/s3/commonawslib.robot  |   5 +
 .../dist/src/main/smoketest/s3/objectcopy.robot    |  23 +-
 .../dist/src/main/smoketest/s3/objectdelete.robot  |  28 +-
 .../src/main/smoketest/s3/objectmultidelete.robot  |  24 +-
 .../dist/src/main/smoketest/s3/objectputget.robot  |  40 +-
 .../src/main/smoketest/s3/s3_compatbility_check.sh |  47 ++
 .../smoketest/security/ozone-secure-token.robot    |  16 +-
 .../dist/src/main/smoketest/spnego/web.robot       |   2 +-
 .../src/main/smoketest/topology/loaddata.robot     |   2 +-
 hadoop-ozone/dist/src/shell/ozone/ozone            |  15 +-
 hadoop-ozone/dist/src/shell/ozone/stop-ozone.sh    |   8 +-
 .../hadoop/ozone/TestMiniChaosOzoneCluster.java    |  40 +-
 .../src/test/blockade/ozone/client.py              |  10 +-
 .../hadoop/ozone/insight/BaseInsightPoint.java     |  34 +-
 .../ozone/insight/BaseInsightSubCommand.java       |   6 +
 .../apache/hadoop/ozone/insight/InsightPoint.java  |   4 +-
 .../hadoop/ozone/insight/MetricGroupDisplay.java   |   4 +-
 .../hadoop/ozone/insight/MetricsSubCommand.java    |  33 +-
 .../datanode/DatanodeDispatcherInsight.java        | 107 ++++
 .../insight/datanode/PipelineComponentUtil.java    |  78 +++
 .../ozone/insight/datanode/RatisInsight.java       |  45 +-
 .../hadoop/ozone/insight/om/KeyManagerInsight.java |   2 +-
 .../hadoop/ozone/insight/om/OmProtocolInsight.java |   2 +-
 .../ozone/insight/scm/NodeManagerInsight.java      |   2 +-
 .../ozone/insight/scm/ReplicaManagerInsight.java   |   2 +-
 .../scm/ScmProtocolBlockLocationInsight.java       |   2 +-
 .../scm/ScmProtocolContainerLocationInsight.java   |   2 +-
 .../insight/scm/ScmProtocolDatanodeInsight.java    |   2 +-
 .../insight/scm/ScmProtocolSecurityInsight.java    |   2 +-
 .../fs/ozone/TestOzoneFSWithObjectStoreCreate.java | 160 ++++++
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |   6 +-
 .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java |  18 +-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   4 +-
 .../ozone/TestStorageContainerManagerHelper.java   |  31 +-
 .../rpc/TestBlockOutputStreamWithFailures.java     |   4 +-
 ...estBlockOutputStreamWithFailuresFlushDelay.java |   4 +-
 .../rpc/TestCloseContainerHandlingByClient.java    |  55 --
 .../hadoop/ozone/client/rpc/TestCommitWatcher.java |  39 +-
 .../rpc/TestContainerStateMachineFailures.java     |  20 +-
 .../client/rpc/TestDiscardPreallocatedBlocks.java  | 186 ++++++
 .../rpc/TestOzoneClientRetriesOnException.java     |   2 +-
 ...estOzoneClientRetriesOnExceptionFlushDelay.java |   2 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     | 560 +++++++++++++++++-
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |  40 +-
 .../client/rpc/TestValidateBCSIDOnRestart.java     |   8 +-
 .../ozone/client/rpc/TestWatchForCommit.java       |   2 +-
 .../commandhandler/TestBlockDeletion.java          |  92 ++-
 .../ozoneimpl/TestOzoneContainerRatis.java         | 138 -----
 .../container/ozoneimpl/TestRatisManager.java      | 124 ----
 .../ozone/freon/TestOzoneClientKeyGenerator.java   |   2 -
 .../hadoop/ozone/fsck/TestContainerMapper.java     |   3 +
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java |  18 +-
 .../hadoop/ozone/om/TestOMRatisSnapshots.java      |   2 -
 .../src/main/proto/OmClientProtocol.proto          |  11 +
 hadoop-ozone/interface-storage/pom.xml             |  96 ++++
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |   0
 .../ozone/om/codec/OMTransactionInfoCodec.java     |   0
 .../hadoop/ozone/om/codec/OmBucketInfoCodec.java   |   0
 .../hadoop/ozone/om/codec/OmKeyInfoCodec.java      |   0
 .../ozone/om/codec/OmMultipartKeyInfoCodec.java    |   0
 .../hadoop/ozone/om/codec/OmPrefixInfoCodec.java   |   5 +-
 .../hadoop/ozone/om/codec/OmVolumeArgsCodec.java   |   0
 .../ozone/om/codec/RepeatedOmKeyInfoCodec.java     |   0
 .../hadoop/ozone/om/codec/S3SecretValueCodec.java  |   0
 .../ozone/om/codec/TokenIdentifierCodec.java       |   0
 .../hadoop/ozone/om/codec/UserVolumeInfoCodec.java |   0
 .../apache/hadoop/ozone/om/codec/package-info.java |   2 +-
 .../hadoop/ozone/om/helpers/OmPrefixInfo.java      |  13 +-
 .../hadoop/ozone/om/helpers/OzoneAclStorage.java   |  63 +++
 .../ozone/om/helpers/OzoneAclStorageUtil.java      |  62 ++
 .../hadoop/ozone/om/helpers}/package-info.java     |   4 +-
 .../org/apache/hadoop/ozone/om}/package-info.java  |   4 +-
 .../hadoop/ozone/om/ratis/OMTransactionInfo.java   |   2 +-
 .../hadoop/ozone/om/ratis}/package-info.java       |   4 +-
 .../src/main/proto/OmStorageProtocol.proto         |  60 ++
 .../ozone/om/codec/TestOMTransactionInfoCodec.java |   0
 .../hadoop/ozone/om/codec/TestOmKeyInfoCodec.java  |   0
 .../om/codec/TestOmMultipartKeyInfoCodec.java      |   0
 .../ozone/om/codec/TestOmPrefixInfoCodec.java      |   0
 .../ozone/om/codec/TestRepeatedOmKeyInfoCodec.java |   0
 .../ozone/om/codec/TestS3SecretValueCodec.java     |   0
 .../apache/hadoop/ozone/om/codec/package-info.java |   0
 .../hadoop/ozone/om/helpers/TestOmPrefixInfo.java  |   0
 .../hadoop/ozone/om/helpers}/package-info.java     |   4 +-
 hadoop-ozone/ozone-manager/pom.xml                 |   5 +
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |   4 +-
 .../apache/hadoop/ozone/om/KeyDeletingService.java |   9 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  81 ++-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |   3 +-
 .../hadoop/ozone/om/OpenKeyCleanupService.java     |   3 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  83 ++-
 .../org/apache/hadoop/ozone/om/VolumeManager.java  |   9 -
 .../apache/hadoop/ozone/om/VolumeManagerImpl.java  |  39 --
 .../apache/hadoop/ozone/om/codec/package-info.java |   3 +
 .../apache/hadoop/ozone/om/fs/OzoneManagerFS.java  |  53 +-
 .../ozone/om/ratis/OzoneManagerRatisServer.java    |   6 +-
 .../ozone/om/ratis/OzoneManagerStateMachine.java   |   2 +-
 .../hadoop/ozone/om/request/OMClientRequest.java   |  18 +-
 .../om/request/bucket/OMBucketCreateRequest.java   |  37 ++
 .../request/bucket/OMBucketSetPropertyRequest.java |  64 +++
 .../bucket/acl/OMBucketRemoveAclRequest.java       |   2 +-
 .../request/bucket/acl/OMBucketSetAclRequest.java  |   2 +-
 .../ozone/om/request/file/OMFileCreateRequest.java |  43 +-
 .../om/request/key/OMAllocateBlockRequest.java     |  45 +-
 .../ozone/om/request/key/OMKeyCommitRequest.java   |  48 +-
 .../ozone/om/request/key/OMKeyCreateRequest.java   |  48 +-
 .../ozone/om/request/key/OMKeyDeleteRequest.java   |  33 +-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |  76 ++-
 .../ozone/om/request/key/OMKeysDeleteRequest.java  |  26 +-
 .../ozone/om/request/key/OMKeysRenameRequest.java  |   2 +-
 .../om/request/key/OMTrashRecoverRequest.java      |   2 +-
 .../om/request/key/acl/OMKeyRemoveAclRequest.java  |   2 +-
 .../om/request/key/acl/OMKeySetAclRequest.java     |   2 +-
 .../key/acl/prefix/OMPrefixRemoveAclRequest.java   |   2 +-
 .../key/acl/prefix/OMPrefixSetAclRequest.java      |   2 +-
 .../multipart/S3MultipartUploadAbortRequest.java   |  41 +-
 .../S3MultipartUploadCommitPartRequest.java        |  22 +-
 .../S3MultipartUploadCompleteRequest.java          |  10 +
 .../om/request/volume/OMVolumeSetQuotaRequest.java |  54 +-
 .../om/response/file/OMFileCreateResponse.java     |  10 +-
 .../om/response/key/OMAllocateBlockResponse.java   |  18 +-
 .../ozone/om/response/key/OMKeyCommitResponse.java |  18 +-
 .../ozone/om/response/key/OMKeyCreateResponse.java |  20 +-
 .../ozone/om/response/key/OMKeyDeleteResponse.java |  18 +-
 .../om/response/key/OMKeysDeleteResponse.java      |  20 +-
 .../multipart/S3MultipartUploadAbortResponse.java  |  20 +-
 .../S3MultipartUploadCommitPartResponse.java       |  18 +-
 ...OzoneManagerProtocolServerSideTranslatorPB.java |   4 +-
 .../OzoneDelegationTokenSecretManager.java         |   2 +-
 .../hadoop/ozone/om/failover/TestOMFailovers.java  | 151 +++++
 .../ozone/om/request/TestOMRequestUtils.java       |  57 +-
 .../bucket/TestOMBucketSetPropertyRequest.java     |  41 +-
 .../request/file/TestOMDirectoryCreateRequest.java |   4 +-
 .../ozone/om/request/key/TestOMKeyRequest.java     |   7 +-
 .../s3/multipart/TestS3MultipartRequest.java       |   4 +-
 .../volume/TestOMVolumeSetOwnerRequest.java        |   2 +-
 .../volume/TestOMVolumeSetQuotaRequest.java        |  69 ++-
 .../response/key/TestOMAllocateBlockResponse.java  |  27 +-
 .../om/response/key/TestOMKeyCommitResponse.java   |  19 +-
 .../om/response/key/TestOMKeyCreateResponse.java   |  23 +-
 .../om/response/key/TestOMKeyDeleteResponse.java   |  27 +-
 .../om/response/key/TestOMKeysDeleteResponse.java  |  30 +-
 .../s3/multipart/TestS3MultipartResponse.java      |   7 +-
 .../TestS3MultipartUploadAbortResponse.java        |  21 +-
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |  41 +-
 hadoop-ozone/ozonefs-shaded/pom.xml                |   5 +
 hadoop-ozone/pom.xml                               |   9 +-
 .../ozone/recon/MetricsServiceProviderFactory.java |  86 +++
 .../hadoop/ozone/recon/ReconControllerModule.java  |   1 +
 .../hadoop/ozone/recon/ReconSchemaManager.java     |   2 +-
 .../hadoop/ozone/recon/ReconServerConfigKeys.java  |  10 +
 .../org/apache/hadoop/ozone/recon/ReconUtils.java  |  15 +-
 .../ozone/recon/api/MetricsProxyEndpoint.java      | 118 ++++
 .../hadoop/ozone/recon/api/PipelineEndpoint.java   |  59 +-
 .../ozone/recon/codec/DatanodeDetailsCodec.java    |   5 +-
 .../apache/hadoop/ozone/recon/metrics/Metric.java  |  51 ++
 .../ReconIncrementalContainerReportHandler.java    |   4 +-
 .../ozone/recon/spi/MetricsServiceProvider.java    |  60 ++
 .../spi/impl/OzoneManagerServiceProviderImpl.java  |   2 +-
 .../spi/impl/PrometheusServiceProviderImpl.java    | 212 +++++++
 .../hadoop/ozone/recon/ReconTestInjector.java      |   3 +
 .../apache/hadoop/ozone/recon/TestReconUtils.java  |   6 +-
 .../hadoop/ozone/recon/api/TestEndpoints.java      | 119 +++-
 .../impl/TestOzoneManagerServiceProviderImpl.java  |   9 +-
 .../test/resources/prometheus-test-response.txt    |  21 +
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  24 +-
 .../hadoop/ozone/client/ObjectStoreStub.java       |   5 +-
 .../hadoop/ozone/client/OzoneVolumeStub.java       |   6 +-
 .../s3/endpoint/TestMultipartUploadWithCopy.java   |  15 +-
 .../org/apache/hadoop/ozone/conf/OzoneGetConf.java |  86 +++
 .../ozone/conf/OzoneManagersCommandHandler.java    |  53 ++
 .../ozone/conf/PrintConfKeyCommandHandler.java     |  51 ++
 .../StorageContainerManagersCommandHandler.java    |  52 ++
 .../org/apache/hadoop/ozone/conf/package-info.java |  11 +-
 .../hadoop/ozone/freon/HadoopDirTreeGenerator.java |  15 +-
 .../ozone/freon/HadoopNestedDirGenerator.java      |   5 +-
 .../hadoop/ozone/freon/RandomKeyGenerator.java     |  53 +-
 .../ozone/shell/ClearSpaceQuotaOptions.java}       |  29 +-
 .../hadoop/ozone/shell/SetSpaceQuotaOptions.java}  |  30 +-
 .../hadoop/ozone/shell/bucket/BucketCommands.java  |   4 +-
 .../ClearQuotaHandler.java}                        |  32 +-
 .../ozone/shell/bucket/CreateBucketHandler.java    |  13 +
 .../hadoop/ozone/shell/bucket/SetQuotaHandler.java |  62 ++
 .../hadoop/ozone/shell/token/GetTokenHandler.java  |   8 +-
 .../ozone/shell/token/PrintTokenHandler.java       |   3 +-
 .../ozone/shell/token/RenewTokenHandler.java       |   4 +-
 .../hadoop/ozone/shell/token/TokenOption.java      |  38 +-
 .../ClearQuotaHandler.java}                        |  30 +-
 .../ozone/shell/volume/CreateVolumeHandler.java    |  18 +-
 ...dateVolumeHandler.java => SetQuotaHandler.java} |  47 +-
 .../ozone/shell/volume/UpdateVolumeHandler.java    |  11 -
 .../hadoop/ozone/shell/volume/VolumeCommands.java  |   4 +-
 .../hadoop/ozone/conf/TestGetConfOptions.java      |  90 +++
 pom.xml                                            |  22 +-
 453 files changed, 11235 insertions(+), 3415 deletions(-)
 copy hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-single.robot => dev-support/byteman/appendlog.btm (66%)
 create mode 100644 dev-support/byteman/hcfs-read.btm
 copy hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-single.robot => dev-support/byteman/ratis-flush.btm (57%)
 copy hadoop-ozone/dev-support/checks/build.sh => dev-support/byteman/ratis-no-flush.btm (77%)
 mode change 100755 => 100644
 copy hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-single.robot => dev-support/byteman/watchforcommit.btm (56%)
 create mode 100644 dev-support/byteman/watchforcommit_all.btm
 rename hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java => hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java (60%)
 create mode 100644 hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java
 copy hadoop-hdds/{framework/src/test/java/org/apache/hadoop/hdds/server/TestJsonUtils.java => client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java} (55%)
 copy hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/{VersionInfo.java => RatisVersionInfo.java} (50%)
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java
 create mode 100644 hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigurationReflectionUtil.java
 delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/BlockDataCodec.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/ChunkInfoListCodec.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java
 copy {hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec => hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata}/package-info.java (76%)
 create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
 create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/000024.sst
 create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/000026.sst
 create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/000032.sst
 create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/000034.log
 create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/CURRENT
 create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/IDENTITY
 create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/MANIFEST-000033
 create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/OPTIONS-000033
 create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/OPTIONS-000036
 create mode 100644 hadoop-hdds/container-service/src/test/resources/123.container
 create mode 100644 hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md
 delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
 copy hadoop-ozone/dist/src/main/smoketest/{topology/loaddata.robot => basic/getconf.robot} (64%)
 create mode 100755 hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh
 create mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/DatanodeDispatcherInsight.java
 create mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/PipelineComponentUtil.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
 delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
 delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
 create mode 100644 hadoop-ozone/interface-storage/pom.xml
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OMTransactionInfoCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java (91%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/S3SecretValueCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java (100%)
 copy hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java (95%)
 rename hadoop-ozone/{common => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java (92%)
 create mode 100644 hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorage.java
 create mode 100644 hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorageUtil.java
 copy hadoop-ozone/{ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec => interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers}/package-info.java (91%)
 copy hadoop-ozone/{ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec => interface-storage/src/main/java/org/apache/hadoop/ozone/om}/package-info.java (92%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java (100%)
 copy hadoop-ozone/{ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec => interface-storage/src/main/java/org/apache/hadoop/ozone/om/ratis}/package-info.java (92%)
 create mode 100644 hadoop-ozone/interface-storage/src/main/proto/OmStorageProtocol.proto
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestOMTransactionInfoCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java (100%)
 rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestS3SecretValueCodec.java (100%)
 copy hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java (100%)
 rename hadoop-ozone/{common => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java (100%)
 copy hadoop-ozone/{ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec => interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers}/package-info.java (92%)
 create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
 create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/MetricsServiceProviderFactory.java
 create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java
 create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/Metric.java
 create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/MetricsServiceProvider.java
 create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/PrometheusServiceProviderImpl.java
 create mode 100644 hadoop-ozone/recon/src/test/resources/prometheus-test-response.txt
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneGetConf.java
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/PrintConfKeyCommandHandler.java
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/StorageContainerManagersCommandHandler.java
 copy hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/package-info.java (79%)
 rename hadoop-ozone/{ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java => tools/src/main/java/org/apache/hadoop/ozone/shell/ClearSpaceQuotaOptions.java} (60%)
 copy hadoop-ozone/{ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java => tools/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java} (55%)
 copy hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/{token/RenewTokenHandler.java => bucket/ClearQuotaHandler.java} (57%)
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetQuotaHandler.java
 copy hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/{token/RenewTokenHandler.java => volume/ClearQuotaHandler.java} (59%)
 copy hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/{UpdateVolumeHandler.java => SetQuotaHandler.java} (58%)
 create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org


[hadoop-ozone] 02/08: HDDS-4173. Implement HDDS Version management using the LayoutVersionManager interface. (#1392)

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 3fe83ce990770d23094312c9b936400cbbca377d
Author: prashantpogde <pr...@gmail.com>
AuthorDate: Wed Sep 9 09:46:18 2020 -0700

    HDDS-4173.  Implement HDDS Version management using the LayoutVersionManager interface. (#1392)
---
 .../hdds/upgrade/HDDSLayoutFeatureCatalog.java     | 69 ++++++++++++++++
 .../hdds/upgrade/HDDSLayoutVersionManager.java     | 92 ++++++++++++++++++++++
 .../hadoop/hdds/upgrade/HDDSUpgradeAction.java     | 27 +++++++
 .../apache/hadoop/hdds/upgrade/package-info.java   | 23 ++++++
 .../server/upgrade/NewSCMFeatureUpgradeAction.java | 34 ++++++++
 .../hdds/scm/server/upgrade/SCMUpgradeAction.java  | 29 +++++++
 .../hdds/scm/server/upgrade/package-info.java      | 23 ++++++
 7 files changed, 297 insertions(+)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
new file mode 100644
index 0000000..9793f5d
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.upgrade;
+
+import java.util.Optional;
+
+import org.apache.hadoop.ozone.upgrade.LayoutFeature;
+
+/**
+ * Catalog of HDDS features.
+ */
+public class HDDSLayoutFeatureCatalog {
+
+  /**
+   * List of HDDS Features.
+   */
+  public enum HDDSLayoutFeature implements LayoutFeature {
+    INITIAL_VERSION(0, "Initial Layout Version");
+
+
+    private int layoutVersion;
+    private String description;
+    private Optional<HDDSUpgradeAction> hddsUpgradeAction = Optional.empty();
+
+    HDDSLayoutFeature(final int layoutVersion, String description) {
+      this.layoutVersion = layoutVersion;
+      this.description = description;
+    }
+
+    HDDSLayoutFeature(final int layoutVersion, String description,
+                    HDDSUpgradeAction upgradeAction) {
+      this.layoutVersion = layoutVersion;
+      this.description = description;
+      hddsUpgradeAction = Optional.of(upgradeAction);
+    }
+
+    @Override
+    public int layoutVersion() {
+      return layoutVersion;
+    }
+
+    @Override
+    public String description() {
+      return description;
+    }
+
+    @Override
+    public Optional<? extends HDDSUpgradeAction> onFinalizeAction() {
+      return hddsUpgradeAction;
+    }
+  }
+}
+
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java
new file mode 100644
index 0000000..3ed28b2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.upgrade;
+
+
+import java.io.IOException;
+
+import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeatureCatalog.HDDSLayoutFeature;
+import org.apache.hadoop.ozone.upgrade.AbstractLayoutVersionManager;
+import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Class to manage layout versions and features for Storage Container Manager
+ * and DataNodes.
+ */
+public final class HDDSLayoutVersionManager extends
+    AbstractLayoutVersionManager {
+
+  private static HDDSLayoutVersionManager hddsLayoutVersionManager;
+
+  private HDDSLayoutVersionManager() {
+  }
+
+  /**
+   * Read only instance to HDDS Version Manager.
+   * @return version manager instance.
+   */
+  public static synchronized LayoutVersionManager getInstance() {
+    if (hddsLayoutVersionManager == null) {
+      throw new RuntimeException("HDDS Layout Version Manager not yet " +
+          "initialized.");
+    }
+    return hddsLayoutVersionManager;
+  }
+
+
+  /**
+   * Initialize HDDS version manager from scmstorage.
+   * @return version manager instance.
+   */
+  public static synchronized HDDSLayoutVersionManager initialize(
+      Storage hddsStorage)
+      throws IOException {
+    if (hddsLayoutVersionManager == null) {
+      hddsLayoutVersionManager = new HDDSLayoutVersionManager();
+      hddsLayoutVersionManager.init(hddsStorage);
+    }
+    return hddsLayoutVersionManager;
+  }
+
+  /**
+   * Initialize the HDDS Layout Features and current Layout Version.
+   * @param storage to read the current layout version.
+   * @throws IOException on error.
+   */
+  private void init(Storage storage) throws IOException {
+    init(storage.getLayoutVersion(), HDDSLayoutFeature.values());
+    if (metadataLayoutVersion > softwareLayoutVersion) {
+      throw new IOException(
+          String.format("Cannot initialize VersionManager. Metadata " +
+                  "layout version (%d) > software layout version (%d)",
+              metadataLayoutVersion, softwareLayoutVersion));
+    }
+  }
+
+  @VisibleForTesting
+  protected synchronized static void resetLayoutVersionManager() {
+    if (hddsLayoutVersionManager != null) {
+      hddsLayoutVersionManager.reset();
+      hddsLayoutVersionManager = null;
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSUpgradeAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSUpgradeAction.java
new file mode 100644
index 0000000..0808b0f
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSUpgradeAction.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.upgrade;
+
+import org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeAction;
+
+/**
+ * Upgrade Action for SCM and DataNodes.
+ */
+public interface HDDSUpgradeAction<T> extends UpgradeAction<T> {
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/package-info.java
new file mode 100644
index 0000000..74b9638
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.upgrade;
+
+/**
+ * This package contains SCM Upgrade related classes.
+ */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/NewSCMFeatureUpgradeAction.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/NewSCMFeatureUpgradeAction.java
new file mode 100644
index 0000000..88afad1
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/NewSCMFeatureUpgradeAction.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server.upgrade;
+
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
+
+/**
+ * Example SCM Action class to help with understanding.
+ */
+public class NewSCMFeatureUpgradeAction implements
+    HDDSUpgradeAction<StorageContainerManager> {
+
+  @Override
+  public void executeAction(StorageContainerManager scm) {
+    // Do blah....
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java
new file mode 100644
index 0000000..809c3dd
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeAction.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server.upgrade; 
+
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
+
+/**
+ * Upgrade Action for StorageContainerManager which takes in an 'SCM' instance.
+ */
+public interface SCMUpgradeAction<T> extends
+    HDDSUpgradeAction<StorageContainerManager> {
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/package-info.java
new file mode 100644
index 0000000..23b45cf
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server.upgrade;
+
+/**
+ * This package contains SCM Upgrade related classes.
+ */


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org


[hadoop-ozone] 06/08: HDDS-4252. Add the current layout versions to DN - SCM proto payload. (#1432)

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit b96834f7ee18bb556b96ce1679a610d679e10f97
Author: prashantpogde <pr...@gmail.com>
AuthorDate: Mon Sep 28 11:28:22 2020 -0700

    HDDS-4252. Add the current layout versions to DN - SCM proto payload. (#1432)
---
 .../hdds/upgrade/HDDSLayoutVersionManager.java     |  21 +---
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   2 +
 .../upgrade/AbstractLayoutVersionManager.java      |   9 +-
 .../upgrade/TestAbstractLayoutVersionManager.java  |  30 +++--
 .../common/statemachine/DatanodeStateMachine.java  |  19 +++-
 .../FinalizeNewLayoutVersionCommandHandler.java    | 121 +++++++++++++++++++++
 .../states/endpoint/HeartbeatEndpointTask.java     |  65 ++++++++++-
 .../states/endpoint/RegisterEndpointTask.java      |  57 +++++++++-
 .../upgrade/DataNodeLayoutVersionManager.java      | 119 ++++++++++++++++++++
 .../ozone/container/upgrade/package-info.java      |  21 ++++
 .../protocol/StorageContainerDatanodeProtocol.java |   6 +-
 .../commands/FinalizeNewLayoutVersionCommand.java  |  73 +++++++++++++
 ...inerDatanodeProtocolClientSideTranslatorPB.java |  11 +-
 ...inerDatanodeProtocolServerSideTranslatorPB.java |   8 +-
 .../hadoop/ozone/container/common/ScmTestMock.java |  11 +-
 .../states/endpoint/TestHeartbeatEndpointTask.java |  12 +-
 .../proto/ScmServerDatanodeHeartbeatProtocol.proto |   1 +
 .../hdds/scm/server/SCMDatanodeProtocolServer.java |   4 +-
 .../ozone/container/common/TestEndPoint.java       |  22 +++-
 .../om/upgrade/OMLayoutVersionManagerImpl.java     |  12 +-
 .../hadoop/ozone/recon/api/TestEndpoints.java      |  12 +-
 21 files changed, 575 insertions(+), 61 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java
index 3ed28b2..8c3ff3d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java
@@ -32,7 +32,8 @@ import com.google.common.annotations.VisibleForTesting;
  * Class to manage layout versions and features for Storage Container Manager
  * and DataNodes.
  */
-public final class HDDSLayoutVersionManager extends
+@SuppressWarnings("FinalClass")
+public class HDDSLayoutVersionManager extends
     AbstractLayoutVersionManager {
 
   private static HDDSLayoutVersionManager hddsLayoutVersionManager;
@@ -62,26 +63,12 @@ public final class HDDSLayoutVersionManager extends
       throws IOException {
     if (hddsLayoutVersionManager == null) {
       hddsLayoutVersionManager = new HDDSLayoutVersionManager();
-      hddsLayoutVersionManager.init(hddsStorage);
+      hddsLayoutVersionManager.init(hddsStorage.getLayoutVersion(),
+          HDDSLayoutFeature.values());
     }
     return hddsLayoutVersionManager;
   }
 
-  /**
-   * Initialize the HDDS Layout Features and current Layout Version.
-   * @param storage to read the current layout version.
-   * @throws IOException on error.
-   */
-  private void init(Storage storage) throws IOException {
-    init(storage.getLayoutVersion(), HDDSLayoutFeature.values());
-    if (metadataLayoutVersion > softwareLayoutVersion) {
-      throw new IOException(
-          String.format("Cannot initialize VersionManager. Metadata " +
-                  "layout version (%d) > software layout version (%d)",
-              metadataLayoutVersion, softwareLayoutVersion));
-    }
-  }
-
   @VisibleForTesting
   protected synchronized static void resetLayoutVersionManager() {
     if (hddsLayoutVersionManager != null) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index a7aca16..a079925 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -37,6 +37,8 @@ public final class OzoneConsts {
   public static final String STORAGE_DIR = "scm";
   public static final String SCM_ID = "scmUuid";
 
+  public static final String DATANODE_STORAGE_CONFIG = "datanode.config";
+
   public static final String OZONE_SIMPLE_ROOT_USER = "root";
   public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java
index 99f72c8..158900b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.upgrade;
 
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -40,12 +41,18 @@ public abstract class AbstractLayoutVersionManager implements
   protected Map<String, LayoutFeature> featureMap = new HashMap<>();
   protected volatile boolean isInitialized = false;
 
-  protected void init(int version, LayoutFeature[] lfs) {
+  protected void init(int version, LayoutFeature[] lfs) throws IOException {
     if (!isInitialized) {
       metadataLayoutVersion = version;
       initializeFeatures(lfs);
       softwareLayoutVersion = features.lastKey();
       isInitialized = true;
+      if (metadataLayoutVersion > softwareLayoutVersion) {
+        throw new IOException(
+            String.format("Cannot initialize VersionManager. Metadata " +
+                    "layout version (%d) > software layout version (%d)",
+                metadataLayoutVersion, softwareLayoutVersion));
+      }
     }
   }
 
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java
index 44fa100..b7faf74 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java
@@ -22,6 +22,8 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.io.IOException;
+
 import org.junit.Before;
 import org.junit.Test;
 
@@ -40,19 +42,29 @@ public class TestAbstractLayoutVersionManager {
 
   @Test
   public void testInit() {
-    versionManager.init(1,
-        getTestLayoutFeatures(2));
-    assertEquals(2, versionManager.features.size());
-    assertEquals(2, versionManager.featureMap.size());
-    assertEquals(1, versionManager.getMetadataLayoutVersion());
-    assertEquals(2, versionManager.getSoftwareLayoutVersion());
-    assertTrue(versionManager.needsFinalization());
+    try {
+      versionManager.init(1,
+          getTestLayoutFeatures(2));
+      assertEquals(2, versionManager.features.size());
+      assertEquals(2, versionManager.featureMap.size());
+      assertEquals(1, versionManager.getMetadataLayoutVersion());
+      assertEquals(2, versionManager.getSoftwareLayoutVersion());
+      assertTrue(versionManager.needsFinalization());
+    } catch (IOException e) {
+      // We don't expect it to throw IOException.
+      assertTrue(false);
+    }
   }
 
   @Test
   public void testNeedsFinalization() {
-    versionManager.init(2, getTestLayoutFeatures(2));
-    assertFalse(versionManager.needsFinalization());
+    try {
+      versionManager.init(2, getTestLayoutFeatures(2));
+      assertFalse(versionManager.needsFinalization());
+    } catch (IOException e) {
+      // We don't expect it to throw IOException.
+      assertTrue(false);
+    }
   }
 
   private LayoutFeature[] getTestLayoutFeatures(int num) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index f07cc3a..f52a6b2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -27,6 +27,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
@@ -42,6 +43,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.Comm
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CreatePipelineCommandHandler;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteContainerCommandHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.FinalizeNewLayoutVersionCommandHandler;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ReplicateContainerCommandHandler;
 import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
@@ -49,6 +51,7 @@ import org.apache.hadoop.ozone.container.replication.ContainerReplicator;
 import org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator;
 import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor;
 import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
+import org.apache.hadoop.ozone.container.upgrade.DataNodeLayoutVersionManager;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.JvmPauseMonitor;
 import org.apache.hadoop.util.Time;
@@ -82,6 +85,9 @@ public class DatanodeStateMachine implements Closeable {
   private JvmPauseMonitor jvmPauseMonitor;
   private CertificateClient dnCertClient;
   private final HddsDatanodeStopService hddsDatanodeStopService;
+
+  private DataNodeLayoutVersionManager dataNodeVersionManager;
+
   /**
    * Used to synchronize to the OzoneContainer object created in the
    * constructor in a non-thread-safe way - see HDDS-3116.
@@ -96,14 +102,17 @@ public class DatanodeStateMachine implements Closeable {
    *                     enabled
    */
   public DatanodeStateMachine(DatanodeDetails datanodeDetails,
-      ConfigurationSource conf, CertificateClient certClient,
-      HddsDatanodeStopService hddsDatanodeStopService) throws IOException {
+                              OzoneConfiguration conf,
+                              CertificateClient certClient,
+                              HddsDatanodeStopService hddsDatanodeStopService)
+      throws IOException {
     DatanodeConfiguration dnConf =
         conf.getObject(DatanodeConfiguration.class);
 
     this.hddsDatanodeStopService = hddsDatanodeStopService;
     this.conf = conf;
     this.datanodeDetails = datanodeDetails;
+    dataNodeVersionManager = DataNodeLayoutVersionManager.initialize(conf);
     executorService = Executors.newFixedThreadPool(
         getEndPointTaskThreadPoolSize(),
         new ThreadFactoryBuilder()
@@ -145,6 +154,7 @@ public class DatanodeStateMachine implements Closeable {
             dnConf.getContainerDeleteThreads()))
         .addHandler(new ClosePipelineCommandHandler())
         .addHandler(new CreatePipelineCommandHandler(conf))
+        .addHandler(new FinalizeNewLayoutVersionCommandHandler())
         .setConnectionManager(connectionManager)
         .setContainer(container)
         .setContext(context)
@@ -544,4 +554,9 @@ public class DatanodeStateMachine implements Closeable {
   public ReplicationSupervisor getSupervisor() {
     return supervisor;
   }
+
+  @VisibleForTesting
+  public DataNodeLayoutVersionManager getDataNodeVersionManager() {
+    return dataNodeVersionManager;
+  }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java
new file mode 100644
index 0000000..dc0fdfe
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos
+    .FinalizeNewLayoutVersionCommandProto;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .SCMConnectionManager;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
+import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+import org.apache.hadoop.ozone.protocol.commands.FinalizeNewLayoutVersionCommand;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Handler for FinalizeNewLayoutVersion command received from SCM.
+ */
+public class FinalizeNewLayoutVersionCommandHandler implements CommandHandler {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FinalizeNewLayoutVersionCommandHandler.class);
+
+  private AtomicLong invocationCount = new AtomicLong(0);
+  private long totalTime;
+
+  /**
+   * Constructs a FinalizeNewLayoutVersionCommandHandler.
+   */
+  public FinalizeNewLayoutVersionCommandHandler() {
+  }
+
+  /**
+   * Handles a given SCM command.
+   *
+   * @param command           - SCM Command
+   * @param ozoneContainer         - Ozone Container.
+   * @param context           - Current Context.
+   * @param connectionManager - The SCMs that we are talking to.
+   */
+  @Override
+  public void handle(SCMCommand command, OzoneContainer ozoneContainer,
+      StateContext context, SCMConnectionManager connectionManager) {
+    LOG.debug("Processing FinalizeNewLayoutVersionCommandHandler command.");
+    invocationCount.incrementAndGet();
+    final long startTime = Time.monotonicNow();
+    final DatanodeDetails datanodeDetails = context.getParent()
+        .getDatanodeDetails();
+    final FinalizeNewLayoutVersionCommandProto finalizeCommand =
+        ((FinalizeNewLayoutVersionCommand)command).getProto();
+    final ContainerController controller = ozoneContainer.getController();
+    final boolean finalizeUpgrade =
+        finalizeCommand.getFinalizeNewLayoutVersion();
+    try {
+      // TODO : finalization logic
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Finalize Upgrade called!");
+      }
+    } catch (Exception e) {
+      LOG.debug("Unexpected Error: {} ", e);
+    } finally {
+      long endTime = Time.monotonicNow();
+      totalTime += endTime - startTime;
+    }
+  }
+
+  /**
+   * Returns the command type that this command handler handles.
+   *
+   * @return Type
+   */
+  @Override
+  public SCMCommandProto.Type getCommandType() {
+    return SCMCommandProto.Type.finalizeNewLayoutVersionCommand;
+  }
+
+  /**
+   * Returns number of times this handler has been invoked.
+   *
+   * @return int
+   */
+  @Override
+  public int getInvocationCount() {
+    return (int)invocationCount.get();
+  }
+
+  /**
+   * Returns the average time this function takes to run.
+   *
+   * @return long
+   */
+  @Override
+  public long getAverageRunTime() {
+    if (invocationCount.get() > 0) {
+      return totalTime / invocationCount.get();
+    }
+    return 0;
+  }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index da2034d..63ccff6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.PipelineAction;
@@ -45,11 +47,13 @@ import org.apache.hadoop.ozone.container.common.statemachine
 import org.apache.hadoop.ozone.container.common.statemachine
     .EndpointStateMachine.EndPointStates;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.container.upgrade.DataNodeLayoutVersionManager;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand;
 import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.FinalizeNewLayoutVersionCommand;
 import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
 
 import org.slf4j.Logger;
@@ -83,14 +87,32 @@ public class HeartbeatEndpointTask
   private StateContext context;
   private int maxContainerActionsPerHB;
   private int maxPipelineActionsPerHB;
+  private DataNodeLayoutVersionManager layoutVersionManager;
 
   /**
    * Constructs a SCM heart beat.
    *
+   * @param rpcEndpoint rpc Endpoint
    * @param conf Config.
+   * @param context State context
    */
   public HeartbeatEndpointTask(EndpointStateMachine rpcEndpoint,
-      ConfigurationSource conf, StateContext context) {
+                               ConfigurationSource conf, StateContext context) {
+    this(rpcEndpoint, conf, context,
+        context.getParent().getDataNodeVersionManager());
+  }
+
+  /**
+   * Constructs a SCM heart beat.
+   *
+   * @param rpcEndpoint rpc Endpoint
+   * @param conf Config.
+   * @param context State context
+   * @param versionManager Layout version Manager
+   */
+  public HeartbeatEndpointTask(EndpointStateMachine rpcEndpoint,
+                               ConfigurationSource conf, StateContext context,
+                               DataNodeLayoutVersionManager versionManager) {
     this.rpcEndpoint = rpcEndpoint;
     this.conf = conf;
     this.context = context;
@@ -98,6 +120,12 @@ public class HeartbeatEndpointTask
         HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT);
     this.maxPipelineActionsPerHB = conf.getInt(HDDS_PIPELINE_ACTION_MAX_LIMIT,
         HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT);
+    if (versionManager != null) {
+      this.layoutVersionManager = versionManager;
+    } else {
+      this.layoutVersionManager =
+         context.getParent().getDataNodeVersionManager();
+    }
   }
 
   /**
@@ -132,8 +160,16 @@ public class HeartbeatEndpointTask
     try {
       Preconditions.checkState(this.datanodeDetailsProto != null);
 
+      LayoutVersionProto layoutinfo = LayoutVersionProto.newBuilder()
+          .setSoftwareLayoutVersion(
+              layoutVersionManager.getSoftwareLayoutVersion())
+          .setMetadataLayoutVersion(
+              layoutVersionManager.getMetadataLayoutVersion())
+          .build();
+
       requestBuilder = SCMHeartbeatRequestProto.newBuilder()
-          .setDatanodeDetails(datanodeDetailsProto);
+          .setDatanodeDetails(datanodeDetailsProto)
+          .setDataNodeLayoutVersion(layoutinfo);
       addReports(requestBuilder);
       addContainerActions(requestBuilder);
       addPipelineActions(requestBuilder);
@@ -331,6 +367,16 @@ public class HeartbeatEndpointTask
         }
         this.context.addCommand(closePipelineCommand);
         break;
+      case finalizeNewLayoutVersionCommand:
+        FinalizeNewLayoutVersionCommand finalizeNewLayoutVersionCommand =
+            FinalizeNewLayoutVersionCommand.getFromProtobuf(
+                commandResponseProto.getFinalizeNewLayoutVersionCommandProto());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Received SCM finalize command {}",
+              finalizeNewLayoutVersionCommand.getId());
+        }
+        this.context.addCommand(finalizeNewLayoutVersionCommand);
+        break;
       default:
         throw new IllegalArgumentException("Unknown response : "
             + commandResponseProto.getCommandType().name());
@@ -346,6 +392,7 @@ public class HeartbeatEndpointTask
     private ConfigurationSource conf;
     private DatanodeDetails datanodeDetails;
     private StateContext context;
+    private DataNodeLayoutVersionManager versionManager;
 
     /**
      * Constructs the builder class.
@@ -365,6 +412,18 @@ public class HeartbeatEndpointTask
     }
 
     /**
+     * Sets the LayoutVersionManager.
+     *
+     * @param versionMgr - config
+     * @return Builder
+     */
+    public Builder setLayoutVersionManager(
+        DataNodeLayoutVersionManager versionMgr) {
+      this.versionManager = versionMgr;
+      return this;
+    }
+
+    /**
      * Sets the Config.
      *
      * @param config - config
@@ -416,7 +475,7 @@ public class HeartbeatEndpointTask
       }
 
       HeartbeatEndpointTask task = new HeartbeatEndpointTask(this
-          .endPointStateMachine, this.conf, this.context);
+          .endPointStateMachine, this.conf, this.context, this.versionManager);
       task.setDatanodeDetailsProto(datanodeDetails.getProtoBufMessage());
       return task;
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 60d2bb2..83c43b2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -31,8 +31,11 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+import org.apache.hadoop.ozone.container.upgrade.DataNodeLayoutVersionManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -54,6 +57,24 @@ public final class RegisterEndpointTask implements
   private DatanodeDetails datanodeDetails;
   private final OzoneContainer datanodeContainerManager;
   private StateContext stateContext;
+  private DataNodeLayoutVersionManager layoutVersionManager;
+
+  /**
+   * Creates a register endpoint task.
+   *
+   * @param rpcEndPoint - endpoint
+   * @param conf - conf
+   * @param ozoneContainer - container
+   * @param context - State context
+   */
+  @VisibleForTesting
+  public RegisterEndpointTask(EndpointStateMachine rpcEndPoint,
+                              ConfigurationSource conf,
+                              OzoneContainer ozoneContainer,
+                              StateContext context) {
+    this(rpcEndPoint, conf, ozoneContainer, context,
+        context.getParent().getDataNodeVersionManager());
+  }
 
   /**
    * Creates a register endpoint task.
@@ -61,16 +82,23 @@ public final class RegisterEndpointTask implements
    * @param rpcEndPoint - endpoint
    * @param conf - conf
    * @param ozoneContainer - container
+   * @param context - State context
+   * @param versionManager - layout version Manager
    */
   @VisibleForTesting
   public RegisterEndpointTask(EndpointStateMachine rpcEndPoint,
       ConfigurationSource conf, OzoneContainer ozoneContainer,
-      StateContext context) {
+      StateContext context, DataNodeLayoutVersionManager versionManager) {
     this.rpcEndPoint = rpcEndPoint;
     this.conf = conf;
     this.datanodeContainerManager = ozoneContainer;
     this.stateContext = context;
-
+    if (versionManager != null) {
+      this.layoutVersionManager = versionManager;
+    } else {
+      this.layoutVersionManager =
+          context.getParent().getDataNodeVersionManager();
+    }
   }
 
   /**
@@ -112,6 +140,12 @@ public final class RegisterEndpointTask implements
 
       if (rpcEndPoint.getState()
           .equals(EndpointStateMachine.EndPointStates.REGISTER)) {
+        LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
+            .setMetadataLayoutVersion(
+                layoutVersionManager.getMetadataLayoutVersion())
+            .setSoftwareLayoutVersion(
+                layoutVersionManager.getSoftwareLayoutVersion())
+            .build();
         ContainerReportsProto containerReport =
             datanodeContainerManager.getController().getContainerReport();
         NodeReportProto nodeReport = datanodeContainerManager.getNodeReport();
@@ -120,7 +154,7 @@ public final class RegisterEndpointTask implements
         // TODO : Add responses to the command Queue.
         SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint()
             .register(datanodeDetails.getExtendedProtoBufMessage(),
-                nodeReport, containerReport, pipelineReportsProto);
+            nodeReport, containerReport, pipelineReportsProto, layoutInfo);
         Preconditions.checkState(UUID.fromString(response.getDatanodeUUID())
                 .equals(datanodeDetails.getUuid()),
             "Unexpected datanode ID in the response.");
@@ -167,6 +201,7 @@ public final class RegisterEndpointTask implements
     private DatanodeDetails datanodeDetails;
     private OzoneContainer container;
     private StateContext context;
+    private DataNodeLayoutVersionManager versionManager;
 
     /**
      * Constructs the builder class.
@@ -197,6 +232,18 @@ public final class RegisterEndpointTask implements
     }
 
     /**
+     * Sets the LayoutVersionManager.
+     *
+     * @param versionMgr - config
+     * @return Builder.
+     */
+    public Builder setLayoutVersionManager(
+        DataNodeLayoutVersionManager versionMgr) {
+      this.versionManager = versionMgr;
+      return this;
+    }
+
+    /**
      * Sets the NodeID.
      *
      * @param dnDetails - NodeID proto
@@ -255,10 +302,10 @@ public final class RegisterEndpointTask implements
       }
 
       RegisterEndpointTask task = new RegisterEndpointTask(this
-          .endPointStateMachine, this.conf, this.container, this.context);
+          .endPointStateMachine, this.conf, this.container, this.context,
+          this.versionManager);
       task.setDatanodeDetails(datanodeDetails);
       return task;
     }
-
   }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutVersionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutVersionManager.java
new file mode 100644
index 0000000..c075938
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutVersionManager.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.upgrade;
+
+
+import static org.apache.hadoop.ozone.container.common.volume.HddsVolume.HDDS_VOLUME_DIR;
+import static org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet.getDatanodeStorageDirs;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Properties;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeatureCatalog.HDDSLayoutFeature;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.upgrade.AbstractLayoutVersionManager;
+import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Class to manage layout versions and features for Storage Container Manager
+ * and DataNodes.
+ */
+@SuppressWarnings("FinalClass")
+public class DataNodeLayoutVersionManager extends
+    AbstractLayoutVersionManager {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      DataNodeLayoutVersionManager.class);
+  private static DataNodeLayoutVersionManager dataNodeLayoutVersionManager;
+
+  private DataNodeLayoutVersionManager() {
+  }
+
+  /**
+   * Read only instance to DataNode Version Manager.
+   * @return version manager instance.
+   */
+  public static synchronized LayoutVersionManager getInstance() {
+    if (dataNodeLayoutVersionManager == null) {
+      throw new RuntimeException("DataNode Layout Version Manager not yet " +
+          "initialized.");
+    }
+    return dataNodeLayoutVersionManager;
+  }
+
+  /**
+   * Initialize DataNode version manager from version file stored on the
+   * DataNode.
+   * @param conf - Ozone Configuration
+   * @return version manager instance.
+   */
+  public static synchronized DataNodeLayoutVersionManager initialize(
+      ConfigurationSource conf)
+      throws IOException {
+    if (dataNodeLayoutVersionManager == null) {
+      dataNodeLayoutVersionManager = new DataNodeLayoutVersionManager();
+      int layoutVersion = 0;
+      Collection<String> rawLocations = getDatanodeStorageDirs(conf);
+      for (String locationString : rawLocations) {
+        StorageLocation location = StorageLocation.parse(locationString);
+        File hddsRootDir = new File(location.getUri().getPath(),
+            HDDS_VOLUME_DIR);
+        // Read the version from VersionFile Stored on the data node.
+        File versionFile = HddsVolumeUtil.getVersionFile(hddsRootDir);
+        if (!versionFile.exists()) {
+          // Volume Root is non empty but VERSION file does not exist.
+          LOG.warn("VERSION file does not exist in volume {},"
+                  + " current volume state: {}.",
+              hddsRootDir.getPath(), HddsVolume.VolumeState.INCONSISTENT);
+          continue;
+        } else {
+          LOG.debug("Reading version file {} from disk.", versionFile);
+        }
+        Properties props = DatanodeVersionFile.readFrom(versionFile);
+        if (props.isEmpty()) {
+          continue;
+        }
+        int storedVersion = HddsVolumeUtil.getLayOutVersion(props, versionFile);
+        if (storedVersion > layoutVersion) {
+          layoutVersion = storedVersion;
+        }
+      }
+      dataNodeLayoutVersionManager.init(layoutVersion,
+          HDDSLayoutFeature.values());
+    }
+    return dataNodeLayoutVersionManager;
+  }
+
+  @VisibleForTesting
+  protected synchronized static void resetLayoutVersionManager() {
+    if (dataNodeLayoutVersionManager != null) {
+      dataNodeLayoutVersionManager.reset();
+      dataNodeLayoutVersionManager = null;
+    }
+  }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/package-info.java
new file mode 100644
index 0000000..275e0db
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.upgrade;
+/**
+ Contains upgrade related classes.
+ **/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index 64f2943..3af0386 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -21,6 +21,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsP
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
@@ -76,12 +78,14 @@ public interface StorageContainerDatanodeProtocol {
    * @param extendedDatanodeDetailsProto - extended Datanode Details.
    * @param nodeReport - Node Report.
    * @param containerReportsRequestProto - Container Reports.
+   * @param layoutInfo - Layout Version Information.
    * @return SCM Command.
    */
   SCMRegisteredResponseProto register(
       ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto,
       NodeReportProto nodeReport,
       ContainerReportsProto containerReportsRequestProto,
-      PipelineReportsProto pipelineReports) throws IOException;
+      PipelineReportsProto pipelineReports,
+      LayoutVersionProto layoutInfo) throws IOException;
 
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/FinalizeNewLayoutVersionCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/FinalizeNewLayoutVersionCommand.java
new file mode 100644
index 0000000..e373f6e
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/FinalizeNewLayoutVersionCommand.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.protocol.commands;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos
+    .LayoutVersionProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos
+    .FinalizeNewLayoutVersionCommandProto;
+
+/**
+ * Asks DataNode to Finalize new upgrade version.
+ */
+public class FinalizeNewLayoutVersionCommand
+    extends SCMCommand<FinalizeNewLayoutVersionCommandProto> {
+
+  private boolean finalizeUpgrade = false;
+  private LayoutVersionProto layoutInfo;
+
+  public FinalizeNewLayoutVersionCommand(boolean finalizeNewLayoutVersion,
+                                         LayoutVersionProto layoutInfo,
+                                         long id) {
+    super(id);
+    finalizeUpgrade = finalizeNewLayoutVersion;
+    this.layoutInfo = layoutInfo;
+  }
+
+  /**
+   * Returns the type of this command.
+   *
+   * @return Type
+   */
+  @Override
+  public SCMCommandProto.Type getType() {
+    return SCMCommandProto.Type.finalizeNewLayoutVersionCommand;
+  }
+
+  @Override
+  public FinalizeNewLayoutVersionCommandProto getProto() {
+    return FinalizeNewLayoutVersionCommandProto.newBuilder()
+        .setFinalizeNewLayoutVersion(finalizeUpgrade)
+        .setCmdId(getId())
+        .setDataNodeLayoutVersion(layoutInfo)
+        .build();
+  }
+
+  public static  FinalizeNewLayoutVersionCommand getFromProtobuf(
+      FinalizeNewLayoutVersionCommandProto finalizeProto) {
+    Preconditions.checkNotNull(finalizeProto);
+    return new FinalizeNewLayoutVersionCommand(
+        finalizeProto.getFinalizeNewLayoutVersion(),
+        finalizeProto.getDataNodeLayoutVersion(), finalizeProto.getCmdId());
+  }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
index 4da8b27..3e9ed94 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
@@ -20,8 +20,8 @@ import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos
     .ExtendedDatanodeDetailsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 
@@ -157,6 +157,7 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
    * @param extendedDatanodeDetailsProto - extended Datanode Details
    * @param nodeReport - Node Report.
    * @param containerReportsRequestProto - Container Reports.
+   * @param layoutInfo - Layout Version Information.
    * @return SCM Command.
    */
   @Override
@@ -164,7 +165,8 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
       ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto,
       NodeReportProto nodeReport,
       ContainerReportsProto containerReportsRequestProto,
-      PipelineReportsProto pipelineReportsProto)
+      PipelineReportsProto pipelineReportsProto,
+      LayoutVersionProto layoutInfo)
       throws IOException {
     SCMRegisterRequestProto.Builder req =
         SCMRegisterRequestProto.newBuilder();
@@ -172,6 +174,9 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
     req.setContainerReport(containerReportsRequestProto);
     req.setPipelineReports(pipelineReportsProto);
     req.setNodeReport(nodeReport);
+    if (layoutInfo != null) {
+      req.setDataNodeLayoutVersion(layoutInfo);
+    }
     return submitRequest(Type.Register,
         (builder) -> builder.setRegisterRequest(req))
         .getRegisterResponse();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
index 740842d..8a45914 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeResponse;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
@@ -68,9 +69,12 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB
         .getContainerReport();
     NodeReportProto dnNodeReport = request.getNodeReport();
     PipelineReportsProto pipelineReport = request.getPipelineReports();
+    LayoutVersionProto layoutInfo = null;
+    if (request.hasDataNodeLayoutVersion()) {
+      layoutInfo = request.getDataNodeLayoutVersion();
+    }
     return impl.register(request.getExtendedDatanodeDetails(), dnNodeReport,
-        containerRequestProto, pipelineReport);
-
+        containerRequestProto, pipelineReport, layoutInfo);
   }
 
   @Override
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index 534f9ef..a938b22 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -21,6 +21,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos
     .ExtendedDatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
@@ -219,10 +221,11 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
   @Override
   public StorageContainerDatanodeProtocolProtos
       .SCMRegisteredResponseProto register(
-          ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto,
-          NodeReportProto nodeReport,
-          ContainerReportsProto containerReportsRequestProto,
-          PipelineReportsProto pipelineReportsProto)
+      ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto,
+      NodeReportProto nodeReport,
+      ContainerReportsProto containerReportsRequestProto,
+      PipelineReportsProto pipelineReportsProto,
+      LayoutVersionProto layoutInfo)
       throws IOException {
     rpcCount.incrementAndGet();
     DatanodeDetailsProto datanodeDetailsProto =
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
index 9b238a1..29d5ce3 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachin
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates;
 import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.container.upgrade.DataNodeLayoutVersionManager;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolClientSideTranslatorPB;
 
 import org.junit.Assert;
@@ -48,6 +49,8 @@ public class TestHeartbeatEndpointTask {
 
   private static final InetSocketAddress TEST_SCM_ENDPOINT =
       new InetSocketAddress("test-scm-1", 9861);
+  private static final int TEST_SOFTWARE_LAYOUT_VERSION = 0;
+  private static final int TEST_METADATA_LAYOUT_VERSION = 0;
 
   @Test
   public void testheartbeatWithoutReports() throws Exception {
@@ -277,10 +280,17 @@ public class TestHeartbeatEndpointTask {
     Mockito.when(endpointStateMachine.getEndPoint()).thenReturn(proxy);
     Mockito.when(endpointStateMachine.getAddress())
         .thenReturn(TEST_SCM_ENDPOINT);
+    DataNodeLayoutVersionManager layoutVersionManager =
+        Mockito.mock(DataNodeLayoutVersionManager.class);
+    Mockito.when(layoutVersionManager.getSoftwareLayoutVersion())
+        .thenReturn(TEST_SOFTWARE_LAYOUT_VERSION);
+    Mockito.when(layoutVersionManager.getMetadataLayoutVersion())
+        .thenReturn(TEST_METADATA_LAYOUT_VERSION);
     return HeartbeatEndpointTask.newBuilder()
         .setConfig(conf)
         .setDatanodeDetails(datanodeDetails)
         .setContext(context)
+        .setLayoutVersionManager(layoutVersionManager)
         .setEndpointStateMachine(endpointStateMachine)
         .build();
   }
@@ -292,4 +302,4 @@ public class TestHeartbeatEndpointTask {
         .setReason(ContainerAction.Reason.CONTAINER_FULL);
     return builder.build();
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
index 6d39a59..be9b7e4 100644
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
@@ -410,6 +410,7 @@ message ClosePipelineCommandProto {
 message FinalizeNewLayoutVersionCommandProto {
   required bool finalizeNewLayoutVersion = 1 [default = false];
   required LayoutVersionProto dataNodeLayoutVersion = 2;
+  required int64 cmdId = 3;
 }
 
 /**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index a295341..d8c5164 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -206,8 +206,10 @@ public class SCMDatanodeProtocolServer implements
       HddsProtos.ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto,
       NodeReportProto nodeReport,
       ContainerReportsProto containerReportsProto,
-          PipelineReportsProto pipelineReportsProto)
+      PipelineReportsProto pipelineReportsProto,
+      StorageContainerDatanodeProtocolProtos.LayoutVersionProto layoutInfo)
       throws IOException {
+    //TODO : DataNode-Upgrade: layoutinfo related processing.
     DatanodeDetails datanodeDetails = DatanodeDetails
         .getFromProtoBuf(extendedDatanodeDetailsProto);
     boolean auditSuccess = true;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 8cad8b0..0b2720d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto;
@@ -53,6 +55,7 @@ import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointT
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+import org.apache.hadoop.ozone.container.upgrade.DataNodeLayoutVersionManager;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
@@ -83,6 +86,8 @@ public class TestEndPoint {
   private static ScmTestMock scmServerImpl;
   private static File testDir;
   private static OzoneConfiguration config;
+  private static final int TEST_SOFTWARE_LAYOUT_VERSION = 0;
+  private static final int TEST_METADATA_LAYOUT_VERSION = 0;
 
   @AfterClass
   public static void tearDown() throws Exception {
@@ -266,6 +271,10 @@ public class TestEndPoint {
   @Test
   public void testRegister() throws Exception {
     DatanodeDetails nodeToRegister = randomDatanodeDetails();
+    LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
+        .setMetadataLayoutVersion(TEST_METADATA_LAYOUT_VERSION)
+        .setSoftwareLayoutVersion(TEST_SOFTWARE_LAYOUT_VERSION)
+        .build();
     try (EndpointStateMachine rpcEndPoint = createEndpoint(
         SCMTestUtils.getConf(), serverAddress, 1000)) {
       SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint()
@@ -273,7 +282,7 @@ public class TestEndPoint {
                   .createNodeReport(
                       getStorageReports(nodeToRegister.getUuid())),
               TestUtils.getRandomContainerReports(10),
-                  TestUtils.getRandomPipelineReports());
+                  TestUtils.getRandomPipelineReports(), layoutInfo);
       Assert.assertNotNull(responseProto);
       Assert.assertEquals(nodeToRegister.getUuidString(),
           responseProto.getDatanodeUUID());
@@ -305,9 +314,15 @@ public class TestEndPoint {
     when(ozoneContainer.getController()).thenReturn(controller);
     when(ozoneContainer.getPipelineReport()).thenReturn(
             TestUtils.getRandomPipelineReports());
+    DataNodeLayoutVersionManager versionManager =
+        Mockito.mock(DataNodeLayoutVersionManager.class);
+    when(versionManager.getMetadataLayoutVersion())
+        .thenReturn(TEST_METADATA_LAYOUT_VERSION);
+    when(versionManager.getSoftwareLayoutVersion())
+        .thenReturn(TEST_SOFTWARE_LAYOUT_VERSION);
     RegisterEndpointTask endpointTask =
         new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer,
-            mock(StateContext.class));
+            mock(StateContext.class), versionManager);
     if (!clearDatanodeDetails) {
       DatanodeDetails datanodeDetails = randomDatanodeDetails();
       endpointTask.setDatanodeDetails(datanodeDetails);
@@ -475,7 +490,8 @@ public class TestEndPoint {
               stateMachine);
 
       HeartbeatEndpointTask endpointTask =
-          new HeartbeatEndpointTask(rpcEndPoint, conf, stateContext);
+          new HeartbeatEndpointTask(rpcEndPoint, conf, stateContext,
+              stateMachine.getDataNodeVersionManager());
       endpointTask.setDatanodeDetailsProto(datanodeDetailsProto);
       endpointTask.call();
       Assert.assertNotNull(endpointTask.getDatanodeDetailsProto());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManagerImpl.java
index 70a8d6b..3533dc8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManagerImpl.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.upgrade;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
 import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION;
 
+import java.io.IOException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
 import java.util.Set;
@@ -90,13 +91,10 @@ public final class OMLayoutVersionManagerImpl
    * @throws OMException on error.
    */
   private void init(Storage storage) throws OMException {
-    init(storage.getLayoutVersion(), OMLayoutFeature.values());
-
-    if (metadataLayoutVersion > softwareLayoutVersion) {
-      throw new OMException(
-          String.format("Cannot initialize VersionManager. Metadata " +
-                  "layout version (%d) > software layout version (%d)",
-              metadataLayoutVersion, softwareLayoutVersion),
+    try {
+      init(storage.getLayoutVersion(), OMLayoutFeature.values());
+    } catch (IOException e) {
+      throw new OMException(String.format(e.getMessage()),
           NOT_SUPPORTED_OPERATION);
     }
     registerOzoneManagerRequests();
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index b99f30c..51f13d6 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.PipelineID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
@@ -140,6 +142,8 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
   private final String prometheusTestResponseFile =
       "prometheus-test-response.txt";
   private ReconUtils reconUtilsMock;
+  private static final int TEST_SOFTWARE_LAYOUT_VERSION = 0;
+  private static final int TEST_METADATA_LAYOUT_VERSION = 0;
 
   @Rule
   public TemporaryFolder temporaryFolder = new TemporaryFolder();
@@ -326,15 +330,19 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
         NodeReportProto.newBuilder()
             .addStorageReport(storageReportProto3)
             .addStorageReport(storageReportProto4).build();
+    LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
+        .setMetadataLayoutVersion(TEST_METADATA_LAYOUT_VERSION)
+        .setSoftwareLayoutVersion(TEST_SOFTWARE_LAYOUT_VERSION)
+        .build();
 
     try {
       reconScm.getDatanodeProtocolServer()
           .register(extendedDatanodeDetailsProto, nodeReportProto,
-              containerReportsProto, pipelineReportsProto);
+              containerReportsProto, pipelineReportsProto, layoutInfo);
       reconScm.getDatanodeProtocolServer()
           .register(extendedDatanodeDetailsProto2, nodeReportProto2,
               ContainerReportsProto.newBuilder().build(),
-              PipelineReportsProto.newBuilder().build());
+              PipelineReportsProto.newBuilder().build(), layoutInfo);
       // Process all events in the event queue
       reconScm.getEventQueue().processAll(1000);
     } catch (Exception ex) {


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org


[hadoop-ozone] 03/08: HDDS-4141. Implement Finalize command in Ozone Manager client. (#1400)

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 9b55f696625c8bf2e0c667d9cc78dfa5dbc391cf
Author: Istvan Fajth <pi...@cloudera.com>
AuthorDate: Wed Sep 9 21:02:24 2020 +0200

    HDDS-4141. Implement Finalize command in Ozone Manager client. (#1400)
---
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |   2 +
 .../ozone/om/protocol/OzoneManagerProtocol.java    |  64 ++++++
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  44 ++++
 .../src/main/proto/OmClientProtocol.proto          |  36 ++++
 hadoop-ozone/ozone-manager/pom.xml                 |   5 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  14 ++
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  69 +++++++
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java   |  13 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   6 +
 .../upgrade/OMFinalizeUpgradeProgressRequest.java  |  87 ++++++++
 .../request/upgrade/OMFinalizeUpgradeRequest.java  |  80 ++++++++
 .../ozone/om/request/upgrade/package-info.java     |  23 +++
 .../hadoop/ozone/om/response/CleanupTableInfo.java |  10 +-
 .../upgrade/OMFinalizeUpgradeProgressResponse.java |  45 +++++
 .../upgrade/OMFinalizeUpgradeResponse.java         |  43 ++++
 .../ozone/om/response/upgrade/package-info.java    |  23 +++
 .../hadoop/ozone/om/TestOmMetadataManager.java     |  10 +
 .../ozone/om/response/TestCleanupTableInfo.java    |  60 ++++--
 .../ozone/admin/om/FinalizeUpgradeSubCommand.java  | 221 +++++++++++++++++++++
 .../org/apache/hadoop/ozone/admin/om/OMAdmin.java  |  39 +++-
 20 files changed, 873 insertions(+), 21 deletions(-)

diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index 2a34580..67bd2a0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -271,6 +271,8 @@ public final class OmUtils {
     case AddAcl:
     case PurgeKeys:
     case RecoverTrash:
+    case FinalizeUpgradeProgress:
+    case FinalizeUpgrade:
       return false;
     default:
       LOG.error("CmdType {} is not categorized as readOnly or not.", cmdType);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
index 79cc926..8c0d686 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
 import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpgradeFinalizationStatus;
 import org.apache.hadoop.ozone.security.OzoneDelegationTokenSelector;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.security.KerberosInfo;
@@ -312,6 +313,69 @@ public interface OzoneManagerProtocol
 
   ServiceInfoEx getServiceInfo() throws IOException;
 
+  /**
+   * Initiate metadata upgrade finalization.
+   * This method when called, initiates finalization of Ozone Manager metadata
+   * during an upgrade. The status returned contains the status
+   * - ALREADY_FINALIZED with empty message list when the software layout
+   *    version and the metadata layout version are equal
+   * - STARTING_FINALIZATION with empty message list when the finalization
+   *    has been started successfully
+   * - If a finalization is already in progress, then the method throws an
+   *    {@link OMException} with a result code INVALID_REQUEST
+   *
+   *
+   * The leader Ozone Manager initiates finalization of the followers via
+   * the Raft protocol in other Ozone Managers, and reports progress to the
+   * client via the {@link #queryUpgradeFinalizationProgress(String, boolean)}
+   * call.
+   *
+   * The follower Ozone Managers reject this request and directs the client to
+   * the leader.
+   *
+   * @param upgradeClientID String identifier of the upgrade finalizer client
+   * @return the finalization status.
+   * @throws IOException
+   *            when finalization is failed, or this Ozone Manager is not the
+   *                leader.
+   * @throws OMException
+   *            when finalization is already in progress.
+   */
+  UpgradeFinalizationStatus finalizeUpgrade(
+      String upgradeClientID
+  ) throws IOException;
+
+  /**
+   * Queries the current status of finalization.
+   * This method when called, returns the status messages from the finalization
+   * progress, if any. The status returned is
+   * - FINALIZATION_IN_PROGRESS, and the messages since the last query if the
+   *    finalization is still running
+   * - FINALIZATION_DONE with a message list containing the messages since
+   *    the last query, if the finalization ended but the messages were not
+   *    yet emitted to the client.
+   * - ALREADY_FINALIZED with an empty message list otherwise
+   * - If finalization is not in progress, but software layout version and
+   *    metadata layout version are different, the method will throw an
+   *    {@link OMException} with a result code INVALID_REQUEST
+   * - If during finalization an other client with different ID than the one
+   *    initiated finalization is calling the method, then an
+   *    {@link OMException} with a result code INVALID_REQUEST is thrown,
+   *    unless the request is forced by a new client, in which case the new
+   *    client takes over the old client and the old client should exit.
+   *
+   * @param upgradeClientID String identifier of the upgrade finalizer client
+   * @param force set force takeover of output monitoring
+   * @return the finalization status and status messages.
+   * @throws IOException
+   *            if there was a problem during the query
+   * @throws OMException
+   *            if finalization is needed but not yet started
+   */
+  UpgradeFinalizationStatus queryUpgradeFinalizationProgress(
+      String upgradeClientID, boolean takeover
+  ) throws IOException;
+
   /*
    * S3 Specific functionality that is supported by Ozone Manager.
    */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 6fea681..919c622 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -78,6 +78,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteK
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetDelegationTokenResponseProto;
@@ -135,6 +139,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclR
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpgradeFinalizationStatus;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
@@ -1073,6 +1078,45 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
         resp.getCaCertificate());
   }
 
+  @Override
+  public UpgradeFinalizationStatus finalizeUpgrade(
+      String upgradeClientID
+  ) throws IOException {
+    FinalizeUpgradeRequest req = FinalizeUpgradeRequest.newBuilder()
+        .setUpgradeClientId(upgradeClientID)
+        .build();
+
+    OMRequest omRequest = createOMRequest(Type.FinalizeUpgrade)
+        .setFinalizeUpgradeRequest(req)
+        .build();
+
+    FinalizeUpgradeResponse response =
+        handleError(submitRequest(omRequest)).getFinalizeUpgradeResponse();
+
+    return response.getStatus();
+  }
+
+  @Override
+  public UpgradeFinalizationStatus queryUpgradeFinalizationProgress(
+      String upgradeClientID, boolean takeover
+  ) throws IOException {
+    FinalizeUpgradeProgressRequest req = FinalizeUpgradeProgressRequest
+        .newBuilder()
+        .setUpgradeClientId(upgradeClientID)
+        .setTakeover(takeover)
+        .build();
+
+    OMRequest omRequest = createOMRequest(Type.FinalizeUpgradeProgress)
+        .setFinalizeUpgradeProgressRequest(req)
+        .build();
+
+    FinalizeUpgradeProgressResponse response =
+        handleError(submitRequest(omRequest))
+            .getFinalizeUpgradeProgressResponse();
+
+    return response.getStatus();
+  }
+
   /**
    * Get a valid Delegation Token.
    *
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index e30f775..d1e2971 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -71,6 +71,8 @@ enum Type {
 
   ServiceList = 51;
   DBUpdates = 53;
+  FinalizeUpgrade = 54;
+  FinalizeUpgradeProgress = 55;
 
   GetDelegationToken = 61;
   RenewDelegationToken = 62;
@@ -138,6 +140,8 @@ message OMRequest {
 
   optional ServiceListRequest               serviceListRequest             = 51;
   optional DBUpdatesRequest                  dbUpdatesRequest              = 53;
+  optional FinalizeUpgradeRequest           finalizeUpgradeRequest         = 54;
+  optional FinalizeUpgradeProgressRequest   finalizeUpgradeProgressRequest = 55;
 
   optional hadoop.common.GetDelegationTokenRequestProto getDelegationTokenRequest = 61;
   optional hadoop.common.RenewDelegationTokenRequestProto renewDelegationTokenRequest= 62;
@@ -211,6 +215,8 @@ message OMResponse {
 
   optional ServiceListResponse               ServiceListResponse           = 51;
   optional DBUpdatesResponse                 dbUpdatesResponse             = 52;
+  optional FinalizeUpgradeResponse           finalizeUpgradeResponse       = 54;
+  optional FinalizeUpgradeProgressResponse finalizeUpgradeProgressResponse = 55;
 
   optional GetDelegationTokenResponseProto getDelegationTokenResponse = 61;
   optional RenewDelegationTokenResponseProto renewDelegationTokenResponse = 62;
@@ -1014,6 +1020,36 @@ message DBUpdatesResponse {
     repeated bytes data = 2;
 }
 
+
+message UpgradeFinalizationStatus {
+  enum Status {
+    ALREADY_FINALIZED = 1;
+    STARTING_FINALIZATION = 2;
+    FINALIZATION_IN_PROGRESS = 3;
+    FINALIZATION_DONE = 4;
+    FINALIZATION_REQUIRED = 5;
+  }
+  required Status status = 1;
+  repeated string messages = 2;
+}
+
+message FinalizeUpgradeRequest {
+  required string upgradeClientId = 1;
+}
+
+message FinalizeUpgradeResponse {
+    required UpgradeFinalizationStatus status = 1;
+}
+
+message FinalizeUpgradeProgressRequest {
+    required string upgradeClientId = 1;
+    optional bool takeover = 2;
+}
+
+message FinalizeUpgradeProgressResponse {
+    required UpgradeFinalizationStatus status = 1;
+}
+
 message ServicePort {
     enum Type {
         RPC = 1;
diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml
index 7891666..4c9a901 100644
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@ -121,6 +121,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>jmockit</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.hamcrest</groupId>
+      <artifactId>hamcrest-all</artifactId>
+      <scope>test</scope>
+    </dependency>
 
     <dependency>
       <groupId>org.reflections</groupId>
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index da7e985..56a9629 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -143,6 +143,20 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
+  public static final String[] ALL_TABLES = new String[] {
+      USER_TABLE,
+      VOLUME_TABLE,
+      BUCKET_TABLE,
+      KEY_TABLE,
+      DELETED_TABLE,
+      OPEN_KEY_TABLE,
+      MULTIPARTINFO_TABLE,
+      S3_SECRET_TABLE,
+      DELEGATION_TOKEN_TABLE,
+      PREFIX_TABLE,
+      TRANSACTION_INFO_TABLE
+  };
+
   private DBStore store;
 
   private final OzoneManagerLock lock;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 5af09ee..1809827 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -41,6 +41,7 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Random;
 import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
@@ -149,6 +150,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleI
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpgradeFinalizationStatus;
 import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
 import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
 import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager;
@@ -2600,6 +2602,73 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     return new ServiceInfoEx(getServiceList(), caCertPem);
   }
 
+  private final List<String> finalizationMsgs = new ArrayList<>();
+  private UpgradeFinalizationStatus.Status finalizationStatus =
+      UpgradeFinalizationStatus.Status.FINALIZATION_REQUIRED;
+
+  @Override
+  public UpgradeFinalizationStatus finalizeUpgrade(String upgradeClientID)
+      throws IOException {
+    if (!finalizationStatus
+        .equals(UpgradeFinalizationStatus.Status.FINALIZATION_REQUIRED)){
+      throw new OMException("Finalization is not needed.", INVALID_REQUEST);
+    }
+    finalizationStatus = UpgradeFinalizationStatus.Status.STARTING_FINALIZATION;
+    UpgradeFinalizationStatus status = UpgradeFinalizationStatus.newBuilder()
+        .setStatus(finalizationStatus)
+        .build();
+    LOG.info("FinalizeUpgrade initiated by client: {}.", upgradeClientID);
+    if (isLeader()) {
+      finalizationMsgs.add("Finalization started.");
+      finalizationStatus =
+          UpgradeFinalizationStatus.Status.FINALIZATION_IN_PROGRESS;
+
+      new Thread(() -> {
+        LOG.info("Finalization thread started.");
+        int i = 0;
+        Random random = new Random(0xafaf);
+        while (i < 50) {
+          int rand = random.nextInt(Math.min(10, 50 - i)) + 1;
+          synchronized (finalizationMsgs) {
+            LOG.info("Emitting {} messages", rand);
+            for (int j = 0; j < rand; j++) {
+              LOG.info("Upgrade MSG: {} - added.", "Message " + i + ".");
+              finalizationMsgs.add("Message " + i + ".");
+              i++;
+            }
+          }
+          try {
+            int sleep = random.nextInt(1200);
+            LOG.info("Sleeping {}ms before emit messages again.", sleep);
+            Thread.sleep(sleep);
+          } catch (InterruptedException e) {
+            LOG.info("Finalization thread interrupted.", e);
+            return;
+          }
+        }
+        LOG.info("Finalization done.");
+        finalizationStatus = UpgradeFinalizationStatus.Status.FINALIZATION_DONE;
+      }, "Finalization-Thread").start();
+    }
+    return status;
+  }
+
+  @Override
+  public UpgradeFinalizationStatus queryUpgradeFinalizationProgress(
+      String upgradeClientID, boolean takeover
+  ) throws IOException {
+    UpgradeFinalizationStatus.Builder builder =
+        UpgradeFinalizationStatus.newBuilder();
+    builder.setStatus(finalizationStatus);
+    List<String> msgs = new ArrayList<>();
+    synchronized (finalizationMsgs) {
+      msgs.addAll(finalizationMsgs);
+      finalizationMsgs.clear();
+    }
+    builder.addAllMessages(msgs);
+    return builder.build();
+  }
+
   @Override
   /**
    * {@inheritDoc}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index f1c144e..68d359e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.ratis;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -37,6 +38,8 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.function.SupplierWithIOException;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
+import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.util.Time;
@@ -404,7 +407,15 @@ public final class OzoneManagerDoubleBuffer {
     CleanupTableInfo cleanupTableInfo =
         responseClass.getAnnotation(CleanupTableInfo.class);
     if (cleanupTableInfo != null) {
-      String[] cleanupTables = cleanupTableInfo.cleanupTables();
+      String[] cleanupTables;
+      if (cleanupTableInfo.cleanupAll()){
+        cleanupTables = Arrays
+            .stream(new OMDBDefinition().getColumnFamilies())
+            .map(DBColumnFamilyDefinition::getTableName)
+            .toArray(String[]::new);
+      } else {
+        cleanupTables = cleanupTableInfo.cleanupTables();
+      }
       for (String table : cleanupTables) {
         cleanupEpochs.computeIfAbsent(table, list -> new ArrayList<>())
             .add(entry.getTrxLogIndex());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 681c0da..f43dfba 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -57,6 +57,8 @@ import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest;
 import org.apache.hadoop.ozone.om.request.security.OMCancelDelegationTokenRequest;
 import org.apache.hadoop.ozone.om.request.security.OMGetDelegationTokenRequest;
 import org.apache.hadoop.ozone.om.request.security.OMRenewDelegationTokenRequest;
+import org.apache.hadoop.ozone.om.request.upgrade.OMFinalizeUpgradeProgressRequest;
+import org.apache.hadoop.ozone.om.request.upgrade.OMFinalizeUpgradeRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeDeleteRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetOwnerRequest;
@@ -160,6 +162,10 @@ public final class OzoneManagerRatisUtils {
       return new S3GetSecretRequest(omRequest);
     case RecoverTrash:
       return new OMTrashRecoverRequest(omRequest);
+    case FinalizeUpgrade:
+      return new OMFinalizeUpgradeRequest(omRequest);
+    case FinalizeUpgradeProgress:
+      return new OMFinalizeUpgradeProgressRequest(omRequest);
     default:
       throw new IllegalStateException("Unrecognized write command " +
           "type request" + cmdType);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeProgressRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeProgressRequest.java
new file mode 100644
index 0000000..3cb9210
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeProgressRequest.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.upgrade;
+
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.upgrade.OMFinalizeUpgradeProgressResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpgradeFinalizationStatus;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * Handles finalizeUpgradeProgress request that serves to query the status
+ * of the async finalization progress.
+ */
+public class OMFinalizeUpgradeProgressRequest extends OMClientRequest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMFinalizeUpgradeProgressRequest.class);
+
+  public OMFinalizeUpgradeProgressRequest(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override public OMClientResponse validateAndUpdateCache(
+      OzoneManager ozoneManager, long transactionLogIndex,
+      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
+
+    LOG.info("Finalization progress check's validateAndUpdateCache"
+        + "called and started.");
+    LOG.trace("Request: {}", getOmRequest());
+    OzoneManagerProtocolProtos.OMResponse.Builder responseBuilder =
+        OmResponseUtil.getOMResponseBuilder(getOmRequest());
+    responseBuilder
+        .setCmdType(OzoneManagerProtocolProtos.Type.FinalizeUpgradeProgress);
+    OMClientResponse response = null;
+
+    try {
+      FinalizeUpgradeProgressRequest finalizeUpgradeProgressRequest =
+          getOmRequest().getFinalizeUpgradeProgressRequest();
+      String upgradeClientID =
+          finalizeUpgradeProgressRequest.getUpgradeClientId();
+      boolean takeover = finalizeUpgradeProgressRequest.getTakeover();
+
+      UpgradeFinalizationStatus status =
+          ozoneManager
+              .queryUpgradeFinalizationProgress(upgradeClientID, takeover);
+
+      FinalizeUpgradeProgressResponse omResponse =
+          FinalizeUpgradeProgressResponse.newBuilder()
+              .setStatus(status)
+              .build();
+
+      responseBuilder.setFinalizeUpgradeProgressResponse(omResponse);
+      response = new OMFinalizeUpgradeProgressResponse(responseBuilder.build());
+      LOG.trace("Returning response: {}", response);
+    } catch (IOException e) {
+      response = new OMFinalizeUpgradeProgressResponse(
+          createErrorOMResponse(responseBuilder, e));
+    }
+
+    return response;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java
new file mode 100644
index 0000000..772eae7
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.upgrade;
+
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.upgrade.OMFinalizeUpgradeResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpgradeFinalizationStatus;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * Handles finalizeUpgrade request.
+ */
+public class OMFinalizeUpgradeRequest extends OMClientRequest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMFinalizeUpgradeRequest.class);
+
+  public OMFinalizeUpgradeRequest(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(
+      OzoneManager ozoneManager, long transactionLogIndex,
+      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
+    LOG.info("Finalization's validateAndUpdateCache called and started.");
+    LOG.trace("Request: {}", getOmRequest());
+    OMResponse.Builder responseBuilder =
+        OmResponseUtil.getOMResponseBuilder(getOmRequest());
+    responseBuilder.setCmdType(OzoneManagerProtocolProtos.Type.FinalizeUpgrade);
+    OMClientResponse response = null;
+
+    try {
+      FinalizeUpgradeRequest request =
+          getOmRequest().getFinalizeUpgradeRequest();
+
+      String upgradeClientID = request.getUpgradeClientId();
+
+      UpgradeFinalizationStatus status =
+          ozoneManager.finalizeUpgrade(upgradeClientID);
+
+      FinalizeUpgradeResponse omResponse =
+          FinalizeUpgradeResponse.newBuilder().setStatus(status).build();
+      responseBuilder.setFinalizeUpgradeResponse(omResponse);
+      response = new OMFinalizeUpgradeResponse(responseBuilder.build());
+      LOG.trace("Returning response: {}", response);
+    } catch (IOException e) {
+      response = new OMFinalizeUpgradeResponse(
+          createErrorOMResponse(responseBuilder, e));
+    }
+
+    return response;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/package-info.java
new file mode 100644
index 0000000..d785d90
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/**
+ * Package contains classes related to upgrade finalization requests.
+ */
+package org.apache.hadoop.ozone.om.request.upgrade;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/CleanupTableInfo.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/CleanupTableInfo.java
index e456423..39416ac 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/CleanupTableInfo.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/CleanupTableInfo.java
@@ -40,5 +40,13 @@ public @interface CleanupTableInfo {
    * during cleanup table cache.
    * @return list of table names.
    */
-  String[] cleanupTables();
+  String[] cleanupTables() default {};
+
+  /**
+   * If all tables are affected, like at update finalization, one can specify
+   * cleanupAll=true, instead of the list of all tables. In this case the
+   * cleanupTable property has to be defined as an empty array (the default).
+   * @return whether to cleanup all tables.
+   */
+  boolean cleanupAll() default false;
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/upgrade/OMFinalizeUpgradeProgressResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/upgrade/OMFinalizeUpgradeProgressResponse.java
new file mode 100644
index 0000000..f07e275
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/upgrade/OMFinalizeUpgradeProgressResponse.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.upgrade;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+
+import java.io.IOException;
+
+/**
+ * Response for finalizeUpgradeProgress request.
+ */
+// yepp this will not be a write request, adding a table here to the annotation
+// just to pass tests related to this annotation.
+@CleanupTableInfo(cleanupTables = { OmMetadataManagerImpl.USER_TABLE })
+public class OMFinalizeUpgradeProgressResponse extends OMClientResponse {
+  public OMFinalizeUpgradeProgressResponse(
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    super(omResponse);
+  }
+
+  @Override protected void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/upgrade/OMFinalizeUpgradeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/upgrade/OMFinalizeUpgradeResponse.java
new file mode 100644
index 0000000..04e6a24
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/upgrade/OMFinalizeUpgradeResponse.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.upgrade;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+
+import java.io.IOException;
+
+/**
+ * Response for finalizeUpgrade request.
+ */
+@CleanupTableInfo(cleanupAll = true)
+public class OMFinalizeUpgradeResponse extends OMClientResponse {
+  public OMFinalizeUpgradeResponse(
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    super(omResponse);
+  }
+
+  @Override
+  protected void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/upgrade/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/upgrade/package-info.java
new file mode 100644
index 0000000..a2e7415
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/upgrade/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/**
+ * Package contains classes related to upgrade finalization  responses.
+ */
+package org.apache.hadoop.ozone.om.response.upgrade;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
index 7c2d258..71193c9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
@@ -44,6 +44,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRE
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
+import static org.hamcrest.Matchers.containsInAnyOrder;
+import static org.junit.Assert.assertThat;
 
 /**
  * Tests OzoneManager MetadataManager.
@@ -612,4 +614,12 @@ public class TestOmMetadataManager {
     }
   }
 
+  @Test
+  public void testAllTablesAreProperInOMMetadataManagerImpl() {
+    String[] tablesByDefinition = OmMetadataManagerImpl.ALL_TABLES;
+
+    Set<String> tablesInManager = omMetadataManager.listTableNames();
+
+    assertThat(tablesInManager, containsInAnyOrder(tablesByDefinition));
+  }
 }
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
index f66e3a3..f813000 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
@@ -22,14 +22,18 @@ import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 import org.reflections.Reflections;
 
 import java.io.File;
+import java.util.Arrays;
 import java.util.Set;
 
+import static org.junit.Assert.assertTrue;
+
 /**
  * This tests check whether {@link OMClientResponse} have defined
  * {@link CleanupTableInfo} annotation.
@@ -39,31 +43,53 @@ public class TestCleanupTableInfo {
   @Rule
   public TemporaryFolder folder = new TemporaryFolder();
 
-  @Test
-  public void checkAnnotationAndTableName() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
+  private OzoneConfiguration conf = new OzoneConfiguration();
+
+  @Before
+  public void setupMetaManager() throws Exception {
     File newFolder = folder.newFolder();
     if (!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString());
-    OMMetadataManager omMetadataManager = new OmMetadataManagerImpl(conf);
+  }
 
-    Set<String> tables = omMetadataManager.listTableNames();
-    Reflections reflections = new Reflections(
-        "org.apache.hadoop.ozone.om.response");
-    Set<Class<? extends OMClientResponse>> subTypes =
-        reflections.getSubTypesOf(OMClientResponse.class);
-    subTypes.forEach(aClass -> {
-      Assert.assertTrue(aClass + "does not have annotation of" +
-              " CleanupTableInfo",
+  @Test
+  public void checkAllWriteResponseHasCleanupTableAnnotation() {
+    getResponseClasses().forEach(aClass -> {
+      Assert.assertTrue(
+          aClass + "does not have annotation of CleanupTableInfo",
           aClass.isAnnotationPresent(CleanupTableInfo.class));
-      String[] cleanupTables =
-          aClass.getAnnotation(CleanupTableInfo.class).cleanupTables();
-      Assert.assertTrue(cleanupTables.length >=1);
-      for (String tableName : cleanupTables) {
-        Assert.assertTrue(tables.contains(tableName));
+    });
+  }
+
+  @Test
+  public void checkWriteResponseIsAnnotatedWithKnownTableNames()
+      throws Exception {
+    OMMetadataManager omMetadataManager = new OmMetadataManagerImpl(conf);
+    Set<String> tables = omMetadataManager.listTableNames();
+
+    getResponseClasses().forEach(aClass -> {
+
+      CleanupTableInfo annotation =
+          aClass.getAnnotation(CleanupTableInfo.class);
+      String[] cleanupTables = annotation.cleanupTables();
+      boolean cleanupAll = annotation.cleanupAll();
+
+      if (cleanupTables.length >= 1) {
+        assertTrue(
+            Arrays.stream(cleanupTables).allMatch(tables::contains)
+        );
+      } else {
+        assertTrue(cleanupAll);
       }
+
     });
   }
+
+  private Set<Class<? extends OMClientResponse>> getResponseClasses() {
+    Reflections reflections =
+        new Reflections("org.apache.hadoop.ozone.om.response");
+    return reflections.getSubTypesOf(OMClientResponse.class);
+  }
 }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java
new file mode 100644
index 0000000..b35c621
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java
@@ -0,0 +1,221 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.admin.om;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpgradeFinalizationStatus;
+import picocli.CommandLine;
+
+import java.io.IOException;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST;
+
+/**
+ * Handler of ozone admin om finalizeUpgrade command.
+ */
+@CommandLine.Command(
+    name = "finalizeupgrade",
+    description = "Finalizes Ozone Manager's metadata changes and enables new "
+        + "features after a software upgrade.\n"
+        + "It is possible to specify the service ID for an HA environment, "
+        + "or the Ozone manager host in a non-HA environment, if none provided "
+        + "the default from configuration is being used if not ambiguous.",
+    mixinStandardHelpOptions = true,
+    versionProvider = HddsVersionProvider.class
+)
+public class FinalizeUpgradeSubCommand implements Callable<Void> {
+
+  @CommandLine.ParentCommand
+  private OMAdmin parent;
+
+  @CommandLine.Option(
+      names = {"-id", "--service-id"},
+      description = "Ozone Manager Service ID"
+  )
+  private String omServiceId;
+
+  @CommandLine.Option(
+      names = {"-host", "--service-host"},
+      description = "Ozone Manager Host"
+  )
+  private String omHost;
+
+  @CommandLine.Option(
+      names = {"--takeover"},
+      description = "Forces takeover of monitoring from an other client, if "
+          + "finalization has already been started and did not finished yet."
+  )
+  private boolean force;
+
+  @Override
+  public Void call() throws Exception {
+    boolean forceHA = false;
+    OzoneManagerProtocol client =
+        parent.createOmClient(omServiceId, omHost, forceHA);
+    String upgradeClientID = "Upgrade-Client-" + UUID.randomUUID().toString();
+    try {
+      UpgradeFinalizationStatus status =
+          client.finalizeUpgrade(upgradeClientID);
+      if (isFinalized(status)){
+        System.out.println("Upgrade has already been finalized.");
+        emitExitMsg();
+        return null;
+      } else if (!isStarting(status)){
+        System.err.println("Invalid response from Ozone Manager.");
+        System.err.println(
+            "Current finalization status is: " + status.getStatus()
+        );
+        throw new IOException("Exiting...");
+      }
+    } catch (OMException e) {
+      handleInvalidRequestAfterInitiatingFinalization(e);
+    }
+    monitorAndWaitFinalization(client, upgradeClientID);
+    return null;
+  }
+
+  private void monitorAndWaitFinalization(OzoneManagerProtocol client,
+      String upgradeClientID) throws ExecutionException {
+    ExecutorService exec = Executors.newSingleThreadExecutor();
+    Future<?> monitor =
+        exec.submit(new UpgradeMonitor(client, upgradeClientID, force));
+    try {
+      monitor.get();
+      emitFinishedMsg();
+    } catch (CancellationException|InterruptedException e) {
+      emitCancellationMsg();
+    } catch (ExecutionException e) {
+      emitGeneralErrorMsg();
+      throw e;
+    } finally {
+      exec.shutdown();
+    }
+  }
+
+  private void handleInvalidRequestAfterInitiatingFinalization(
+      OMException e) throws IOException {
+    if (e.getResult().equals(INVALID_REQUEST)) {
+      if (force) {
+        return;
+      }
+      System.err.println("Finalization is already in progress, it is not"
+          + "possible to initiate it again.");
+      e.printStackTrace(System.err);
+      System.err.println("If you want to track progress from a new client"
+          + "for any reason, use --takeover, and the status update will be"
+          + "received by the new client. Note that with forcing to monitor"
+          + "progress from a new client, the old one initiated the upgrade"
+          + "will not be able to monitor the progress further and exit.");
+      throw new IOException("Exiting...");
+    } else {
+      throw e;
+    }
+  }
+
+  private static class UpgradeMonitor implements Callable<Void> {
+
+    private OzoneManagerProtocol client;
+    private String upgradeClientID;
+    private boolean force;
+
+    UpgradeMonitor(
+        OzoneManagerProtocol client,
+        String upgradeClientID,
+        boolean force
+    ) {
+      this.client = client;
+      this.upgradeClientID = upgradeClientID;
+      this.force = force;
+    }
+
+    @Override
+    public Void call() throws Exception {
+      boolean finished = false;
+      while (!finished) {
+        Thread.sleep(500);
+        // do not check for exceptions, if one happens during monitoring we
+        // should report it and exit.
+        UpgradeFinalizationStatus status =
+            client.queryUpgradeFinalizationProgress(upgradeClientID, force);
+        // this can happen after trying to takeover the request after the fact
+        // when there is already nothing to take over.
+        if (isFinalized(status)) {
+          System.out.println("Finalization already finished.");
+          emitExitMsg();
+          return null;
+        }
+        if (isInprogress(status) || isDone(status)) {
+          status.getMessagesList().stream().forEachOrdered(System.out::println);
+        }
+        if (isDone(status)) {
+          emitExitMsg();
+          finished = true;
+        }
+      }
+      return null;
+    }
+
+  }
+  private static void emitExitMsg() {
+    System.out.println("Exiting...");
+  }
+
+  private static boolean isFinalized(UpgradeFinalizationStatus status) {
+    return status.getStatus()
+        .equals(UpgradeFinalizationStatus.Status.ALREADY_FINALIZED);
+  }
+
+  private static boolean isDone(UpgradeFinalizationStatus status) {
+    return status.getStatus()
+        .equals(UpgradeFinalizationStatus.Status.FINALIZATION_DONE);
+  }
+
+  private static boolean isInprogress(UpgradeFinalizationStatus status) {
+    return status.getStatus()
+        .equals(UpgradeFinalizationStatus.Status.FINALIZATION_IN_PROGRESS);
+  }
+
+  private static boolean isStarting(UpgradeFinalizationStatus status) {
+    return status.getStatus()
+        .equals(UpgradeFinalizationStatus.Status.STARTING_FINALIZATION);
+  }
+
+  private static void emitGeneralErrorMsg() {
+    System.err.println("Finalization was not successful.");
+  }
+
+  private static void emitFinishedMsg() {
+    System.out.println("Finalization of Ozone Manager's metadata upgrade "
+        + "finished.");
+  }
+
+  private static void emitCancellationMsg() {
+    System.err.println("Finalization command was cancelled. Note that, this"
+        + "will not cancel finalization in Ozone Manager. Progress can be"
+        + "monitored in the Ozone Manager's log.");
+  }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java
index f9321ab..317c464 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTrans
 import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
 import org.apache.hadoop.security.UserGroupInformation;
 
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
 import org.apache.ratis.protocol.ClientId;
 import org.kohsuke.MetaInfServices;
@@ -41,6 +42,8 @@ import picocli.CommandLine;
 import picocli.CommandLine.Model.CommandSpec;
 import picocli.CommandLine.Spec;
 
+import java.util.Collection;
+
 /**
  * Subcommand for admin operations related to OM.
  */
@@ -50,6 +53,7 @@ import picocli.CommandLine.Spec;
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class,
     subcommands = {
+        FinalizeUpgradeSubCommand.class,
         GetServiceRolesSubcommand.class
     })
 @MetaInfServices(SubcommandWithParent.class)
@@ -86,13 +90,28 @@ public class OMAdmin extends GenericCli implements SubcommandWithParent {
   }
 
   public OzoneManagerProtocolClientSideTranslatorPB createOmClient(
-      String omServiceID) throws Exception {
+      String omServiceID
+  ) throws Exception {
+    return createOmClient(omServiceID, null, true);
+  }
+
+  public OzoneManagerProtocolClientSideTranslatorPB createOmClient(
+      String omServiceID,
+      String omHost,
+      boolean forceHA
+  ) throws Exception {
     OzoneConfiguration conf = parent.getOzoneConf();
+    if (omHost != null && !omHost.isEmpty()) {
+      omServiceID = null;
+      conf.set(OZONE_OM_ADDRESS_KEY, omHost);
+    } else if (omServiceID == null || omServiceID.isEmpty()) {
+      omServiceID = getTheOnlyConfiguredOmServiceIdOrThrow();
+    }
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
         ProtobufRpcEngine.class);
     String clientId = ClientId.randomId().toString();
-    if (OmUtils.isOmHAServiceId(conf, omServiceID)) {
+    if (!forceHA || (forceHA && OmUtils.isOmHAServiceId(conf, omServiceID))) {
       OmTransport omTransport = new Hadoop3OmTransportFactory()
           .createOmTransport(conf, ugi, omServiceID);
       return new OzoneManagerProtocolClientSideTranslatorPB(omTransport,
@@ -106,6 +125,22 @@ public class OMAdmin extends GenericCli implements SubcommandWithParent {
     }
   }
 
+  private String getTheOnlyConfiguredOmServiceIdOrThrow() {
+    if (getConfiguredServiceIds().size() != 1) {
+      throw new IllegalArgumentException("There is no Ozone Manager service ID"
+          + "specified, but there are either zero, or more than one service "
+          + "configured. Please specify the service ID to be finalized.");
+    }
+    return getConfiguredServiceIds().iterator().next();
+  }
+
+  private Collection<String> getConfiguredServiceIds() {
+    OzoneConfiguration conf = parent.getOzoneConf();
+    Collection<String> omServiceIds =
+        conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY);
+    return omServiceIds;
+  }
+
   @Override
   public Class<?> getParentType() {
     return OzoneAdmin.class;


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org


[hadoop-ozone] 08/08: HDDS-4253. Add LayoutVersion request/response for DN registration. (#1457)

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 8fe8a1fa78bf1acecfd1b8f26a6eef04d6ed6a26
Author: prashantpogde <pr...@gmail.com>
AuthorDate: Tue Oct 6 12:37:52 2020 -0700

    HDDS-4253. Add LayoutVersion request/response for DN registration. (#1457)
---
 .../hdds/upgrade/HDDSLayoutFeatureCatalog.java     |  4 +-
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |  2 -
 .../states/endpoint/RegisterEndpointTask.java      |  4 ++
 .../protocol/StorageContainerNodeProtocol.java     |  8 +++-
 .../apache/hadoop/hdds/scm/node/NodeManager.java   |  5 +++
 .../hadoop/hdds/scm/node/SCMNodeManager.java       | 35 +++++++++++++--
 .../hdds/scm/server/SCMDatanodeProtocolServer.java |  8 ++--
 .../hdds/scm/server/StorageContainerManager.java   | 14 +++++-
 .../java/org/apache/hadoop/hdds/scm/TestUtils.java |  2 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java | 10 +++--
 .../TestIncrementalContainerReportHandler.java     | 37 ++++++++++------
 .../hdds/scm/node/TestContainerPlacement.java      | 11 ++++-
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  | 18 ++++----
 .../hdds/scm/node/TestNodeReportHandler.java       | 17 +++++++-
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   | 51 +++++++++++++++++++---
 .../hadoop/hdds/scm/node/TestStatisticsUpdate.java |  4 +-
 .../scm/server/TestSCMBlockProtocolServer.java     |  2 +-
 .../testutils/ReplicationNodeManagerMock.java      |  6 ++-
 .../hadoop/ozone/scm/node/TestSCMNodeMetrics.java  | 14 +++++-
 .../hadoop/ozone/recon/scm/ReconNodeManager.java   |  9 ++--
 .../scm/ReconStorageContainerManagerFacade.java    |  7 ++-
 .../hadoop/ozone/recon/api/TestEndpoints.java      |  2 +-
 .../scm/AbstractReconContainerManagerTest.java     | 12 ++++-
 ...TestReconIncrementalContainerReportHandler.java | 15 ++++++-
 .../ozone/recon/scm/TestReconNodeManager.java      | 11 +++--
 .../ozone/recon/scm/TestReconPipelineManager.java  | 26 ++++++++++-
 26 files changed, 265 insertions(+), 69 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
index 9793f5d..830b699 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
@@ -31,8 +31,8 @@ public class HDDSLayoutFeatureCatalog {
    * List of HDDS Features.
    */
   public enum HDDSLayoutFeature implements LayoutFeature {
-    INITIAL_VERSION(0, "Initial Layout Version");
-
+    INITIAL_VERSION(0, "Initial Layout Version"),
+    FIRST_UPGRADE_VERSION(1, "First Layout Version After Upgrade");
 
     private int layoutVersion;
     private String description;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index a079925..a7aca16 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -37,8 +37,6 @@ public final class OzoneConsts {
   public static final String STORAGE_DIR = "scm";
   public static final String SCM_ID = "scmUuid";
 
-  public static final String DATANODE_STORAGE_CONFIG = "datanode.config";
-
   public static final String OZONE_SIMPLE_ROOT_USER = "root";
   public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 83c43b2..9f2476e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -16,6 +16,8 @@
  */
 package org.apache.hadoop.ozone.container.common.states.endpoint;
 
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.success;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.StringUtils;
@@ -160,6 +162,8 @@ public final class RegisterEndpointTask implements
             "Unexpected datanode ID in the response.");
         Preconditions.checkState(!StringUtils.isBlank(response.getClusterID()),
             "Invalid cluster ID in the response.");
+        Preconditions.checkState(response.getErrorCode() == success,
+            "DataNode has higher Software Layout Version than SCM.");
         if (response.hasHostname() && response.hasIpAddress()) {
           datanodeDetails.setHostName(response.getHostname());
           datanodeDetails.setIpAddress(response.getIpAddress());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
index cb55880..3375773 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
@@ -20,7 +20,9 @@ package org.apache.hadoop.ozone.protocol;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -54,11 +56,13 @@ public interface StorageContainerNodeProtocol {
    * @param datanodeDetails DatanodeDetails
    * @param nodeReport NodeReportProto
    * @param pipelineReport PipelineReportsProto
+   * @param layoutVersionInfo LayoutVersionProto
    * @return  SCMRegisteredResponseProto
    */
   RegisteredCommand register(DatanodeDetails datanodeDetails,
                              NodeReportProto nodeReport,
-                             PipelineReportsProto pipelineReport);
+                             PipelineReportsProto pipelineReport,
+                             LayoutVersionProto layoutVersionInfo);
 
   /**
    * Send heartbeat to indicate the datanode is alive and doing well.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 4af2357..7853181 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -219,4 +220,8 @@ public interface NodeManager extends StorageContainerNodeProtocol,
   int pipelineLimit(DatanodeDetails dn);
 
   int minPipelineLimit(List<DatanodeDetails> dn);
+
+  default HDDSLayoutVersionManager getLayoutVersionManager(){
+    return null;
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 328f271..e602592 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -37,6 +37,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode;
@@ -54,6 +56,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.CachedDNSToSwitchMapping;
@@ -106,17 +109,21 @@ public class SCMNodeManager implements NodeManager {
       new ConcurrentHashMap<>();
   private final int numPipelinesPerMetadataVolume;
   private final int heavyNodeCriteria;
+  private final HDDSLayoutVersionManager scmLayoutVersionManager;
 
   /**
    * Constructs SCM machine Manager.
    */
   public SCMNodeManager(OzoneConfiguration conf,
-      SCMStorageConfig scmStorageConfig, EventPublisher eventPublisher,
-      NetworkTopology networkTopology) {
+                        SCMStorageConfig scmStorageConfig,
+                        EventPublisher eventPublisher,
+                        NetworkTopology networkTopology,
+                        HDDSLayoutVersionManager layoutVersionManager) {
     this.nodeStateManager = new NodeStateManager(conf, eventPublisher);
     this.version = VersionInfo.getLatestVersion();
     this.commandQueue = new CommandQueue();
     this.scmStorageConfig = scmStorageConfig;
+    this.scmLayoutVersionManager = layoutVersionManager;
     LOG.info("Entering startup safe mode.");
     registerMXBean();
     this.metrics = SCMNodeMetrics.create(this);
@@ -248,8 +255,19 @@ public class SCMNodeManager implements NodeManager {
   @Override
   public RegisteredCommand register(
       DatanodeDetails datanodeDetails, NodeReportProto nodeReport,
-      PipelineReportsProto pipelineReportsProto) {
-
+      PipelineReportsProto pipelineReportsProto,
+      LayoutVersionProto layoutInfo) {
+
+    if (layoutInfo != null) {
+      if (layoutInfo.getSoftwareLayoutVersion() >
+          scmLayoutVersionManager.getSoftwareLayoutVersion()) {
+        return RegisteredCommand.newBuilder()
+            .setErrorCode(ErrorCode.errorNodeNotPermitted)
+            .setDatanode(datanodeDetails)
+            .setClusterID(this.scmStorageConfig.getClusterID())
+            .build();
+      }
+    }
     if (!isNodeRegistered(datanodeDetails)) {
       InetAddress dnAddress = Server.getRemoteIp();
       if (dnAddress != null) {
@@ -789,4 +807,13 @@ public class SCMNodeManager implements NodeManager {
   long getSkippedHealthChecks() {
     return nodeStateManager.getSkippedHealthChecks();
   }
+
+  /**
+   * @return  HDDSLayoutVersionManager
+   */
+  @VisibleForTesting
+  @Override
+  public HDDSLayoutVersionManager getLayoutVersionManager() {
+    return scmLayoutVersionManager;
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index d8c5164..eeee1fb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -33,6 +33,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
@@ -207,9 +209,8 @@ public class SCMDatanodeProtocolServer implements
       NodeReportProto nodeReport,
       ContainerReportsProto containerReportsProto,
       PipelineReportsProto pipelineReportsProto,
-      StorageContainerDatanodeProtocolProtos.LayoutVersionProto layoutInfo)
+      LayoutVersionProto layoutInfo)
       throws IOException {
-    //TODO : DataNode-Upgrade: layoutinfo related processing.
     DatanodeDetails datanodeDetails = DatanodeDetails
         .getFromProtoBuf(extendedDatanodeDetailsProto);
     boolean auditSuccess = true;
@@ -218,7 +219,8 @@ public class SCMDatanodeProtocolServer implements
 
     // TODO : Return the list of Nodes that forms the SCM HA.
     RegisteredCommand registeredCommand = scm.getScmNodeManager()
-        .register(datanodeDetails, nodeReport, pipelineReportsProto);
+        .register(datanodeDetails, nodeReport, pipelineReportsProto,
+            layoutInfo);
     if (registeredCommand.getError()
         == SCMRegisteredResponseProto.ErrorCode.success) {
       eventPublisher.fireEvent(CONTAINER_REPORT,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 3cf12e7..91e8cb9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -93,6 +93,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.hdds.utils.HddsVersionInfo;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
@@ -203,6 +204,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
   private NetworkTopology clusterMap;
   private PipelineChoosePolicy pipelineChoosePolicy;
 
+  private HDDSLayoutVersionManager scmLayoutVersionManager;
+
   /**
    * Creates a new StorageContainerManager. Configuration will be
    * updated with information on the actual listening addresses used
@@ -250,6 +253,9 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
           "failure.", ResultCodes.SCM_NOT_INITIALIZED);
     }
 
+    scmLayoutVersionManager =
+        HDDSLayoutVersionManager.initialize(scmStorageConfig);
+
     /**
      * Important : This initialization sequence is assumed by some of our tests.
      * The testSecureOzoneCluster assumes that security checks have to be
@@ -397,8 +403,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     if(configurator.getScmNodeManager() != null) {
       scmNodeManager = configurator.getScmNodeManager();
     } else {
-      scmNodeManager = new SCMNodeManager(
-          conf, scmStorageConfig, eventQueue, clusterMap);
+      scmNodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue,
+          clusterMap, scmLayoutVersionManager);
     }
 
     placementMetrics = SCMContainerPlacementMetrics.create();
@@ -1149,4 +1155,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
   public String getClusterId() {
     return getScmStorageConfig().getClusterID();
   }
+
+  public HDDSLayoutVersionManager getLayoutVersionManager() {
+    return scmLayoutVersionManager;
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 42640f3..144a04a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -116,7 +116,7 @@ public final class TestUtils {
       SCMNodeManager nodeManager) {
     return getDatanodeDetails(
         nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), null,
-                getRandomPipelineReports()));
+                getRandomPipelineReports(), null));
   }
 
   /**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 7aca0f3..42369f9 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hdds.scm.container;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.net.NetConstants;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
@@ -112,14 +114,14 @@ public class MockNodeManager implements NodeManager {
     if (!nodes.isEmpty()) {
       for (int x = 0; x < nodes.size(); x++) {
         DatanodeDetails node = nodes.get(x);
-        register(node, null, null);
+        register(node, null, null, null);
         populateNodeMetric(node, x);
       }
     }
     if (initializeFakeNodes) {
       for (int x = 0; x < nodeCount; x++) {
         DatanodeDetails dd = MockDatanodeDetails.randomDatanodeDetails();
-        register(dd, null, null);
+        register(dd, null, null, null);
         populateNodeMetric(dd, x);
       }
     }
@@ -447,7 +449,9 @@ public class MockNodeManager implements NodeManager {
    */
   @Override
   public RegisteredCommand register(DatanodeDetails datanodeDetails,
-      NodeReportProto nodeReport, PipelineReportsProto pipelineReportsProto) {
+                                    NodeReportProto nodeReport,
+                                    PipelineReportsProto pipelineReportsProto,
+                                    LayoutVersionProto layoutInfo) {
     try {
       node2ContainerMap.insertNewDatanode(datanodeDetails.getUuid(),
           Collections.emptySet());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
index 1af2f73..5d9246a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -61,6 +62,9 @@ public class TestIncrementalContainerReportHandler {
   private ContainerManager containerManager;
   private ContainerStateManager containerStateManager;
   private EventPublisher publisher;
+  private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
+  private static final Integer METADATA_LAYOUT_VERSION = 1;
+  private HDDSLayoutVersionManager versionManager;
 
   @Before
   public void setup() throws IOException {
@@ -73,8 +77,15 @@ public class TestIncrementalContainerReportHandler {
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
     SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
+    this.versionManager =
+        Mockito.mock(HDDSLayoutVersionManager.class);
+    Mockito.when(versionManager.getMetadataLayoutVersion())
+        .thenReturn(METADATA_LAYOUT_VERSION);
+    Mockito.when(versionManager.getSoftwareLayoutVersion())
+        .thenReturn(SOFTWARE_LAYOUT_VERSION);
     this.nodeManager =
-        new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap);
+        new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap,
+            versionManager);
 
     this.containerStateManager = new ContainerStateManager(conf);
     this.publisher = Mockito.mock(EventPublisher.class);
@@ -123,9 +134,9 @@ public class TestIncrementalContainerReportHandler {
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
     final DatanodeDetails datanodeThree = randomDatanodeDetails();
-    nodeManager.register(datanodeOne, null, null);
-    nodeManager.register(datanodeTwo, null, null);
-    nodeManager.register(datanodeThree, null, null);
+    nodeManager.register(datanodeOne, null, null, null);
+    nodeManager.register(datanodeTwo, null, null, null);
+    nodeManager.register(datanodeThree, null, null, null);
     final Set<ContainerReplica> containerReplicas = getReplicas(
         container.containerID(),
         ContainerReplicaProto.State.CLOSING,
@@ -160,9 +171,9 @@ public class TestIncrementalContainerReportHandler {
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
     final DatanodeDetails datanodeThree = randomDatanodeDetails();
-    nodeManager.register(datanodeOne, null, null);
-    nodeManager.register(datanodeTwo, null, null);
-    nodeManager.register(datanodeThree, null, null);
+    nodeManager.register(datanodeOne, null, null, null);
+    nodeManager.register(datanodeTwo, null, null, null);
+    nodeManager.register(datanodeThree, null, null, null);
     final Set<ContainerReplica> containerReplicas = getReplicas(
         container.containerID(),
         ContainerReplicaProto.State.CLOSING,
@@ -198,9 +209,9 @@ public class TestIncrementalContainerReportHandler {
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
     final DatanodeDetails datanodeThree = randomDatanodeDetails();
-    nodeManager.register(datanodeOne, null, null);
-    nodeManager.register(datanodeTwo, null, null);
-    nodeManager.register(datanodeThree, null, null);
+    nodeManager.register(datanodeOne, null, null, null);
+    nodeManager.register(datanodeTwo, null, null, null);
+    nodeManager.register(datanodeThree, null, null, null);
     final Set<ContainerReplica> containerReplicas = getReplicas(
         container.containerID(),
         ContainerReplicaProto.State.CLOSING,
@@ -239,9 +250,9 @@ public class TestIncrementalContainerReportHandler {
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
     final DatanodeDetails datanodeThree = randomDatanodeDetails();
-    nodeManager.register(datanodeOne, null, null);
-    nodeManager.register(datanodeTwo, null, null);
-    nodeManager.register(datanodeThree, null, null);
+    nodeManager.register(datanodeOne, null, null, null);
+    nodeManager.register(datanodeTwo, null, null, null);
+    nodeManager.register(datanodeThree, null, null, null);
     final Set<ContainerReplica> containerReplicas = getReplicas(
         container.containerID(),
         ContainerReplicaProto.State.CLOSED,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 797709e..a7f6466 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.PathUtils;
@@ -61,6 +62,8 @@ import org.mockito.Mockito;
  * Test for different container placement policy.
  */
 public class TestContainerPlacement {
+  private static final int SOFTWARE_LAYOUT_VERSION = 1;
+  private static final int METADATA_LAYOUT_VERSION = 1;
 
   @Rule
   public ExpectedException thrown = ExpectedException.none();
@@ -105,8 +108,14 @@ public class TestContainerPlacement {
     SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
     Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
 
+    HDDSLayoutVersionManager versionManager =
+        Mockito.mock(HDDSLayoutVersionManager.class);
+    Mockito.when(versionManager.getMetadataLayoutVersion())
+        .thenReturn(METADATA_LAYOUT_VERSION);
+    Mockito.when(versionManager.getSoftwareLayoutVersion())
+        .thenReturn(SOFTWARE_LAYOUT_VERSION);
     SCMNodeManager nodeManager = new SCMNodeManager(config,
-        storageConfig, eventQueue, null);
+        storageConfig, eventQueue, null, versionManager);
     return nodeManager;
   }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index f05be76..3e725ce 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -140,25 +140,25 @@ public class TestDeadNodeHandler {
     // test case happy.
 
     nodeManager.register(datanode1,
-        TestUtils.createNodeReport(storageOne), null);
+        TestUtils.createNodeReport(storageOne), null, null);
     nodeManager.register(datanode2,
-        TestUtils.createNodeReport(storageOne), null);
+        TestUtils.createNodeReport(storageOne), null, null);
     nodeManager.register(datanode3,
-        TestUtils.createNodeReport(storageOne), null);
+        TestUtils.createNodeReport(storageOne), null, null);
 
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
+        TestUtils.createNodeReport(storageOne), null, null);
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
+        TestUtils.createNodeReport(storageOne), null, null);
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
+        TestUtils.createNodeReport(storageOne), null, null);
 
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
+        TestUtils.createNodeReport(storageOne), null, null);
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
+        TestUtils.createNodeReport(storageOne), null, null);
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null);
+        TestUtils.createNodeReport(storageOne), null, null);
 
     LambdaTestUtils.await(120000, 1000,
         () -> {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index 69b031c..2710225 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.Event;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Before;
@@ -48,6 +49,9 @@ public class TestNodeReportHandler implements EventPublisher {
   private static final Logger LOG = LoggerFactory
       .getLogger(TestNodeReportHandler.class);
   private NodeReportHandler nodeReportHandler;
+  private HDDSLayoutVersionManager versionManager;
+  private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
+  private static final Integer METADATA_LAYOUT_VERSION = 1;
   private SCMNodeManager nodeManager;
   private String storagePath = GenericTestUtils.getRandomizedTempPath()
       .concat("/" + UUID.randomUUID().toString());
@@ -58,8 +62,16 @@ public class TestNodeReportHandler implements EventPublisher {
     SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
     Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
+
+    this.versionManager =
+        Mockito.mock(HDDSLayoutVersionManager.class);
+    Mockito.when(versionManager.getMetadataLayoutVersion())
+        .thenReturn(METADATA_LAYOUT_VERSION);
+    Mockito.when(versionManager.getSoftwareLayoutVersion())
+        .thenReturn(SOFTWARE_LAYOUT_VERSION);
     nodeManager =
-        new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap);
+        new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap,
+            versionManager);
     nodeReportHandler = new NodeReportHandler(nodeManager);
   }
 
@@ -72,7 +84,8 @@ public class TestNodeReportHandler implements EventPublisher {
     SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn);
     Assert.assertNull(nodeMetric);
 
-    nodeManager.register(dn, getNodeReport(dn, storageOne).getReport(), null);
+    nodeManager.register(dn, getNodeReport(dn, storageOne).getReport(), null,
+        null);
     nodeMetric = nodeManager.getNodeStat(dn);
 
     Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 3f3c4ae..3222a9a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -35,6 +35,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
@@ -49,6 +51,7 @@ import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -64,10 +67,13 @@ import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanode
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.errorNodeNotPermitted;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.success;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.apache.hadoop.hdds.scm.TestUtils.getRandomPipelineReports;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
 import org.junit.After;
 import org.junit.Assert;
@@ -171,6 +177,38 @@ public class TestSCMNodeManager {
   }
 
   /**
+   * Tests that Node manager handles Layout versions correctly.
+   *
+   * @throws IOException
+   * @throws InterruptedException
+   * @throws TimeoutException
+   */
+  @Test
+  public void testScmLayoutOnRegister()
+      throws IOException, InterruptedException, AuthenticationException {
+
+    try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
+      Integer nodeManagerSoftwareLayoutVersion =
+          nodeManager.getLayoutVersionManager().getSoftwareLayoutVersion();
+      LayoutVersionProto layoutInfoSuccess = LayoutVersionProto.newBuilder()
+          .setMetadataLayoutVersion(1)
+          .setSoftwareLayoutVersion(nodeManagerSoftwareLayoutVersion).build();
+      LayoutVersionProto layoutInfoFailure = LayoutVersionProto.newBuilder()
+          .setMetadataLayoutVersion(1)
+          .setSoftwareLayoutVersion(nodeManagerSoftwareLayoutVersion + 1)
+          .build();
+      RegisteredCommand rcmd = nodeManager.register(
+          MockDatanodeDetails.randomDatanodeDetails(), null,
+          getRandomPipelineReports(), layoutInfoSuccess);
+      assertTrue(rcmd.getError() == success);
+      rcmd = nodeManager.register(
+          MockDatanodeDetails.randomDatanodeDetails(), null,
+          getRandomPipelineReports(), layoutInfoFailure);
+      assertTrue(rcmd.getError() == errorNodeNotPermitted);
+    }
+  }
+
+  /**
    * asserts that if we send no heartbeats node manager stays in safemode.
    *
    * @throws IOException
@@ -859,7 +897,8 @@ public class TestSCMNodeManager {
         String storagePath = testDir.getAbsolutePath() + "/" + dnId;
         StorageReportProto report = TestUtils
             .createStorageReport(dnId, storagePath, capacity, used, free, null);
-        nodeManager.register(dn, TestUtils.createNodeReport(report), null);
+        nodeManager.register(dn, TestUtils.createNodeReport(report), null,
+            null);
         nodeManager.processHeartbeat(dn);
       }
       //TODO: wait for EventQueue to be processed
@@ -910,7 +949,7 @@ public class TestSCMNodeManager {
                         used, free, null, failed));
         failed = !failed;
       }
-      nodeManager.register(dn, TestUtils.createNodeReport(reports), null);
+      nodeManager.register(dn, TestUtils.createNodeReport(reports), null, null);
       nodeManager.processHeartbeat(dn);
       //TODO: wait for EventQueue to be processed
       eventQueue.processAll(8000L);
@@ -1081,7 +1120,7 @@ public class TestSCMNodeManager {
 
       nodemanager
           .register(datanodeDetails, TestUtils.createNodeReport(report),
-                  TestUtils.getRandomPipelineReports());
+                  getRandomPipelineReports(), null);
       eq.fireEvent(DATANODE_COMMAND,
           new CommandForDatanode<>(datanodeDetails.getUuid(),
               new CloseContainerCommand(1L,
@@ -1169,7 +1208,7 @@ public class TestSCMNodeManager {
       for (int i = 0; i < nodeCount; i++) {
         DatanodeDetails node = createDatanodeDetails(
             UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
-        nodeManager.register(node, null, null);
+        nodeManager.register(node, null, null, null);
         nodes[i] = node;
       }
 
@@ -1213,7 +1252,7 @@ public class TestSCMNodeManager {
       for (int i = 0; i < nodeCount; i++) {
         DatanodeDetails node = createDatanodeDetails(
             UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
-        nodeManager.register(node, null, null);
+        nodeManager.register(node, null, null, null);
         nodes[i] = node;
       }
 
@@ -1263,7 +1302,7 @@ public class TestSCMNodeManager {
       for (int i = 0; i < nodeCount; i++) {
         DatanodeDetails node = createDatanodeDetails(
             UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
-        nodeManager.register(node, null, null);
+        nodeManager.register(node, null, null, null);
       }
       // test get node
       Assert.assertEquals(0, nodeManager.getNodesByAddress(null).size());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
index a6b0339..e07edc4 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
@@ -94,9 +94,9 @@ public class TestStatisticsUpdate {
         datanode2.getUuid(), storagePath2, 200, 20, 180, null);
 
     nodeManager.register(datanode1,
-        TestUtils.createNodeReport(storageOne), null);
+        TestUtils.createNodeReport(storageOne), null, null);
     nodeManager.register(datanode2,
-        TestUtils.createNodeReport(storageTwo), null);
+        TestUtils.createNodeReport(storageTwo), null, null);
 
     NodeReportProto nodeReportProto1 = TestUtils.createNodeReport(storageOne);
     NodeReportProto nodeReportProto2 = TestUtils.createNodeReport(storageTwo);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index 349e705..945d890 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@ -64,7 +64,7 @@ public class TestSCMBlockProtocolServer {
     // add nodes to scm node manager
     nodeManager = scm.getScmNodeManager();
     for (int i = 0; i < nodeCount; i++) {
-      nodeManager.register(randomDatanodeDetails(), null, null);
+      nodeManager.register(randomDatanodeDetails(), null, null, null);
 
     }
     server = scm.getBlockProtocolServer();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 6d088fe..6d66a4b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -17,6 +17,9 @@
 package org.apache.hadoop.ozone.container.testutils;
 
 import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
@@ -264,7 +267,8 @@ public class ReplicationNodeManagerMock implements NodeManager {
   @Override
   public RegisteredCommand register(DatanodeDetails dd,
                                     NodeReportProto nodeReport,
-                                    PipelineReportsProto pipelineReportsProto) {
+                                    PipelineReportsProto pipelineReportsProto,
+                                    LayoutVersionProto layoutInfo) {
     return null;
   }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
index 7576e8b..d20c55b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
@@ -45,12 +46,15 @@ import org.junit.Assert;
 import static org.junit.Assert.assertEquals;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * Test cases to verify the metrics exposed by SCMNodeManager.
  */
 public class TestSCMNodeMetrics {
 
+  private static final Integer METADATA_LAYOUT_VERSION = 1;
+  private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
   private static SCMNodeManager nodeManager;
 
   private static DatanodeDetails registeredDatanode;
@@ -62,8 +66,14 @@ public class TestSCMNodeMetrics {
     EventQueue publisher = new EventQueue();
     SCMStorageConfig config =
         new SCMStorageConfig(NodeType.DATANODE, new File("/tmp"), "storage");
+    HDDSLayoutVersionManager versionManager =
+        Mockito.mock(HDDSLayoutVersionManager.class);
+    Mockito.when(versionManager.getMetadataLayoutVersion())
+        .thenReturn(METADATA_LAYOUT_VERSION);
+    Mockito.when(versionManager.getSoftwareLayoutVersion())
+        .thenReturn(SOFTWARE_LAYOUT_VERSION);
     nodeManager = new SCMNodeManager(source, config, publisher,
-        new NetworkTopologyImpl(source));
+        new NetworkTopologyImpl(source), versionManager);
 
     registeredDatanode = DatanodeDetails.newBuilder()
         .setHostName("localhost")
@@ -72,7 +82,7 @@ public class TestSCMNodeMetrics {
         .build();
 
     nodeManager.register(registeredDatanode, createNodeReport(),
-        PipelineReportsProto.newBuilder().build());
+        PipelineReportsProto.newBuilder().build(), null);
 
   }
 
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
index d7a6104..7283f5e 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
@@ -65,8 +66,10 @@ public class ReconNodeManager extends SCMNodeManager {
                           SCMStorageConfig scmStorageConfig,
                           EventPublisher eventPublisher,
                           NetworkTopology networkTopology,
-                          Table<UUID, DatanodeDetails> nodeDB) {
-    super(conf, scmStorageConfig, eventPublisher, networkTopology);
+                          Table<UUID, DatanodeDetails> nodeDB,
+                          HDDSLayoutVersionManager scmLayoutVersionManager) {
+    super(conf, scmStorageConfig, eventPublisher, networkTopology,
+        scmLayoutVersionManager);
     this.nodeDB = nodeDB;
     loadExistingNodes();
   }
@@ -78,7 +81,7 @@ public class ReconNodeManager extends SCMNodeManager {
           iterator = nodeDB.iterator();
       while (iterator.hasNext()) {
         DatanodeDetails datanodeDetails = iterator.next().getValue();
-        register(datanodeDetails, null, null);
+        register(datanodeDetails, null, null, null);
         nodeCount++;
       }
       LOG.info("Loaded {} nodes from node DB.", nodeCount);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index 3a0342e..15ac8e6 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdds.scm.safemode.SafeModeManager;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.io.IOUtils;
@@ -87,6 +88,7 @@ public class ReconStorageContainerManagerFacade
   private Set<ReconScmTask> reconScmTasks = new HashSet<>();
   private SCMContainerPlacementMetrics placementMetrics;
   private PlacementPolicy containerPlacementPolicy;
+  private HDDSLayoutVersionManager scmLayoutVersionManager;
 
   @Inject
   public ReconStorageContainerManagerFacade(OzoneConfiguration conf,
@@ -102,9 +104,12 @@ public class ReconStorageContainerManagerFacade
     dbStore = DBStoreBuilder
         .createDBStore(ozoneConfiguration, new ReconSCMDBDefinition());
 
+    this.scmLayoutVersionManager = HDDSLayoutVersionManager
+        .initialize(this.scmStorageConfig);
     this.nodeManager =
         new ReconNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
-            ReconSCMDBDefinition.NODES.getTable(dbStore));
+            ReconSCMDBDefinition.NODES.getTable(dbStore),
+            this.scmLayoutVersionManager);
     placementMetrics = SCMContainerPlacementMetrics.create();
     this.containerPlacementPolicy =
         ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager,
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index 51f13d6..acca61d 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -342,7 +342,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
       reconScm.getDatanodeProtocolServer()
           .register(extendedDatanodeDetailsProto2, nodeReportProto2,
               ContainerReportsProto.newBuilder().build(),
-              PipelineReportsProto.newBuilder().build(), layoutInfo);
+              PipelineReportsProto.newBuilder().build(), null);
       // Process all events in the event queue
       reconScm.getEventQueue().processAll(1000);
     } catch (Exception ex) {
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
index 783f42c..365ab5f 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -65,6 +66,9 @@ public class AbstractReconContainerManagerTest {
   private ReconPipelineManager pipelineManager;
   private ReconContainerManager containerManager;
   private DBStore store;
+  private HDDSLayoutVersionManager layoutVersionManager;
+  public static final int SOFTWARE_LAYOUT_VERSION = 1;
+  public static final int METADATA_LAYOUT_VERSION = 1;
 
   @Before
   public void setUp() throws Exception {
@@ -76,8 +80,14 @@ public class AbstractReconContainerManagerTest {
     scmStorageConfig = new ReconStorageConfig(conf);
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
+    layoutVersionManager = mock(HDDSLayoutVersionManager.class);
+    when(layoutVersionManager.getSoftwareLayoutVersion())
+        .thenReturn(SOFTWARE_LAYOUT_VERSION);
+    when(layoutVersionManager.getMetadataLayoutVersion())
+        .thenReturn(METADATA_LAYOUT_VERSION);
     NodeManager nodeManager =
-        new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap);
+        new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
+            layoutVersionManager);
     pipelineManager = new ReconPipelineManager(conf, nodeManager,
         ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue);
     containerManager = new ReconContainerManager(
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index 1b42f21..d0eacc6 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -50,14 +50,17 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.Incremen
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * Test Recon ICR handler.
  */
 public class TestReconIncrementalContainerReportHandler
     extends AbstractReconContainerManagerTest {
+  private HDDSLayoutVersionManager versionManager;
 
   @Test
   public void testProcessICR() throws IOException, NodeNotFoundException {
@@ -81,9 +84,17 @@ public class TestReconIncrementalContainerReportHandler
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
     SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
+    this.versionManager =
+        Mockito.mock(HDDSLayoutVersionManager.class);
+    Mockito.when(versionManager.getMetadataLayoutVersion())
+        .thenReturn(METADATA_LAYOUT_VERSION);
+    Mockito.when(versionManager.getSoftwareLayoutVersion())
+        .thenReturn(SOFTWARE_LAYOUT_VERSION);
+
     NodeManager nodeManager =
-        new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap);
-    nodeManager.register(datanodeDetails, null, null);
+        new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap,
+            versionManager);
+    nodeManager.register(datanodeDetails, null, null, null);
 
     ReconContainerManager containerManager = getContainerManager();
     ReconIncrementalContainerReportHandler reconIcr =
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
index c934cae..c0c973b 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -52,6 +53,8 @@ public class TestReconNodeManager {
 
   private OzoneConfiguration conf;
   private DBStore store;
+  private ReconStorageConfig reconStorageConfig;
+  private HDDSLayoutVersionManager versionManager;
 
   @Before
   public void setUp() throws Exception {
@@ -59,6 +62,8 @@ public class TestReconNodeManager {
     conf.set(OZONE_METADATA_DIRS,
         temporaryFolder.newFolder().getAbsolutePath());
     conf.set(OZONE_SCM_NAMES, "localhost");
+    reconStorageConfig = new ReconStorageConfig(conf);
+    versionManager = HDDSLayoutVersionManager.initialize(reconStorageConfig);
     store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
   }
 
@@ -75,7 +80,7 @@ public class TestReconNodeManager {
     Table<UUID, DatanodeDetails> nodeTable =
         ReconSCMDBDefinition.NODES.getTable(store);
     ReconNodeManager reconNodeManager = new ReconNodeManager(conf,
-        scmStorageConfig, eventQueue, clusterMap, nodeTable);
+        scmStorageConfig, eventQueue, clusterMap, nodeTable, versionManager);
     ReconNewNodeHandler reconNewNodeHandler =
         new ReconNewNodeHandler(reconNodeManager);
     assertTrue(reconNodeManager.getAllNodes().isEmpty());
@@ -84,7 +89,7 @@ public class TestReconNodeManager {
     String uuidString = datanodeDetails.getUuidString();
 
     // Register a random datanode.
-    reconNodeManager.register(datanodeDetails, null, null);
+    reconNodeManager.register(datanodeDetails, null, null, null);
     reconNewNodeHandler.onMessage(reconNodeManager.getNodeByUuid(uuidString),
         null);
 
@@ -95,7 +100,7 @@ public class TestReconNodeManager {
     eventQueue.close();
     reconNodeManager.close();
     reconNodeManager = new ReconNodeManager(conf, scmStorageConfig, eventQueue,
-        clusterMap, nodeTable);
+        clusterMap, nodeTable, versionManager);
 
     // Verify that the node information was persisted and loaded back.
     assertEquals(1, reconNodeManager.getAllNodes().size());
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
index b190810..a670717 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.ozone.recon.scm.ReconPipelineFactory.ReconPipelineProvider;
@@ -52,6 +53,8 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
 import static org.mockito.Mockito.mock;
 
 /**
@@ -59,12 +62,16 @@ import static org.mockito.Mockito.mock;
  */
 public class TestReconPipelineManager {
 
+  private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
+  private static final Integer METADATA_LAYOUT_VERSION = 1;
+
   @Rule
   public TemporaryFolder temporaryFolder = new TemporaryFolder();
 
   private OzoneConfiguration conf;
   private SCMStorageConfig scmStorageConfig;
   private DBStore store;
+  private HDDSLayoutVersionManager versionManager;
 
   @Before
   public void setup() throws IOException {
@@ -109,8 +116,16 @@ public class TestReconPipelineManager {
 
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
+
+    this.versionManager =
+        Mockito.mock(HDDSLayoutVersionManager.class);
+    Mockito.when(versionManager.getMetadataLayoutVersion())
+        .thenReturn(METADATA_LAYOUT_VERSION);
+    Mockito.when(versionManager.getSoftwareLayoutVersion())
+        .thenReturn(SOFTWARE_LAYOUT_VERSION);
     NodeManager nodeManager =
-        new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap);
+        new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
+            versionManager);
 
     try (ReconPipelineManager reconPipelineManager =
         new ReconPipelineManager(conf, nodeManager,
@@ -145,8 +160,15 @@ public class TestReconPipelineManager {
     Pipeline pipeline = getRandomPipeline();
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
+    this.versionManager =
+        Mockito.mock(HDDSLayoutVersionManager.class);
+    Mockito.when(versionManager.getMetadataLayoutVersion())
+        .thenReturn(METADATA_LAYOUT_VERSION);
+    Mockito.when(versionManager.getSoftwareLayoutVersion())
+        .thenReturn(SOFTWARE_LAYOUT_VERSION);
     NodeManager nodeManager =
-        new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap);
+        new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
+            versionManager);
 
     ReconPipelineManager reconPipelineManager =
         new ReconPipelineManager(conf, nodeManager,


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org


[hadoop-ozone] 04/08: HDDS-4174. Add current HDDS layout version to Datanode heartbeat/registration (#1421)

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 95b8712bbe92d72b5c0a972bd68c6ee7edb6652f
Author: prashantpogde <pr...@gmail.com>
AuthorDate: Mon Sep 14 18:40:05 2020 -0700

    HDDS-4174. Add current HDDS layout version to Datanode heartbeat/registration (#1421)
---
 .../proto/ScmServerDatanodeHeartbeatProtocol.proto | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
index 1dc4bcd..6d39a59 100644
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
@@ -74,6 +74,15 @@ enum Status {
 }
 
 /**
+ * DataNode will advertise its current layout version through
+ * heartbeat as well as registration mechanism.
+ */
+message LayoutVersionProto {
+   required uint32 metadataLayoutVersion = 1;
+   required uint32 softwareLayoutVersion = 2;
+}
+
+/**
  * Request for version info of the software stack on the server.
  */
 message SCMVersionRequestProto {}
@@ -92,6 +101,7 @@ message SCMRegisterRequestProto {
   required NodeReportProto nodeReport = 2;
   required ContainerReportsProto containerReport = 3;
   required PipelineReportsProto pipelineReports = 4;
+  optional LayoutVersionProto dataNodeLayoutVersion = 5;
 }
 
 /**
@@ -126,6 +136,7 @@ message SCMHeartbeatRequestProto {
   optional ContainerActionsProto containerActions = 6;
   optional PipelineActionsProto pipelineActions = 7;
   optional PipelineReportsProto pipelineReports = 8;
+  optional LayoutVersionProto dataNodeLayoutVersion = 9;
 }
 
 /*
@@ -293,6 +304,7 @@ message SCMCommandProto {
     replicateContainerCommand = 5;
     createPipelineCommand = 6;
     closePipelineCommand = 7;
+    finalizeNewLayoutVersionCommand = 8;
   }
   // TODO: once we start using protoc 3.x, refactor this message using "oneof"
   required Type commandType = 1;
@@ -303,6 +315,8 @@ message SCMCommandProto {
   optional ReplicateContainerCommandProto replicateContainerCommandProto = 6;
   optional CreatePipelineCommandProto createPipelineCommandProto = 7;
   optional ClosePipelineCommandProto closePipelineCommandProto = 8;
+  optional FinalizeNewLayoutVersionCommandProto
+  finalizeNewLayoutVersionCommandProto = 9;
 }
 
 /**
@@ -391,6 +405,14 @@ message ClosePipelineCommandProto {
 }
 
 /**
+ * This command asks the DataNode to finalize a new layout version.
+ */
+message FinalizeNewLayoutVersionCommandProto {
+  required bool finalizeNewLayoutVersion = 1 [default = false];
+  required LayoutVersionProto dataNodeLayoutVersion = 2;
+}
+
+/**
  * Protocol used from a datanode to StorageContainerManager.
  *
  * Please see the request and response messages for details of the RPC calls.


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org


[hadoop-ozone] 01/08: HDDS-3829. Introduce Layout Feature interface in Ozone. (#1322)

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 3e21d750297f46a37ca75c885daa6a379a4c14cd
Author: avijayanhwx <14...@users.noreply.github.com>
AuthorDate: Tue Aug 25 17:26:42 2020 -0700

    HDDS-3829. Introduce Layout Feature interface in Ozone. (#1322)
---
 .../upgrade/AbstractLayoutVersionManager.java      | 110 +++++++++++++++++++++
 .../apache/hadoop/ozone/upgrade/LayoutFeature.java |  33 ++++++-
 .../hadoop/ozone/upgrade/LayoutVersionManager.java |  64 ++++++++++++
 .../apache/hadoop/ozone/upgrade}/package-info.java |  12 ++-
 .../upgrade/TestAbstractLayoutVersionManager.java  |  85 ++++++++++++++++
 hadoop-ozone/ozone-manager/pom.xml                 |  31 ++++++
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |   4 +
 .../om/response/s3/security/package-info.java      |   2 +-
 .../NewOmFeatureUpgradeAction.java}                |  20 +++-
 .../OMLayoutFeatureAPI.java}                       |  23 ++++-
 .../ozone/om/upgrade/OMLayoutFeatureAspect.java    |  54 ++++++++++
 .../ozone/om/upgrade/OMLayoutFeatureCatalog.java   |  93 +++++++++++++++++
 .../ozone/om/upgrade/OMLayoutVersionManager.java   |  92 +++++++++++++++++
 .../OmUpgradeAction.java}                          |  16 ++-
 .../s3/security => upgrade}/package-info.java      |   7 +-
 .../request/volume/TestOMVolumeDeleteRequest.java  |   2 +-
 .../om/upgrade/TestOMLayoutFeatureAspect.java      |  71 +++++++++++++
 .../ozone/om/upgrade/TestOMVersionManager.java     |  66 +++++++++++++
 18 files changed, 755 insertions(+), 30 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java
new file mode 100644
index 0000000..99f72c8
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.upgrade;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Optional;
+import java.util.TreeMap;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Layout Version Manager containing generic method implementations.
+ */
+@SuppressWarnings("visibilitymodifier")
+public abstract class AbstractLayoutVersionManager implements
+    LayoutVersionManager {
+
+  protected int metadataLayoutVersion; // MLV.
+  protected int softwareLayoutVersion; // SLV.
+  protected TreeMap<Integer, LayoutFeature> features = new TreeMap<>();
+  protected Map<String, LayoutFeature> featureMap = new HashMap<>();
+  protected volatile boolean isInitialized = false;
+
+  protected void init(int version, LayoutFeature[] lfs) {
+    if (!isInitialized) {
+      metadataLayoutVersion = version;
+      initializeFeatures(lfs);
+      softwareLayoutVersion = features.lastKey();
+      isInitialized = true;
+    }
+  }
+
+  protected void initializeFeatures(LayoutFeature[] lfs) {
+    Arrays.stream(lfs).forEach(f -> {
+      Preconditions.checkArgument(!featureMap.containsKey(f.name()));
+      Preconditions.checkArgument(!features.containsKey(f.layoutVersion()));
+      features.put(f.layoutVersion(), f);
+      featureMap.put(f.name(), f);
+    });
+  }
+
+  public int getMetadataLayoutVersion() {
+    return metadataLayoutVersion;
+  }
+
+  public int getSoftwareLayoutVersion() {
+    return softwareLayoutVersion;
+  }
+
+  public boolean needsFinalization() {
+    return metadataLayoutVersion < softwareLayoutVersion;
+  }
+
+  public boolean isAllowed(LayoutFeature layoutFeature) {
+    return layoutFeature.layoutVersion() <= metadataLayoutVersion;
+  }
+
+  public boolean isAllowed(String featureName) {
+    return featureMap.containsKey(featureName) &&
+        isAllowed(featureMap.get(featureName));
+  }
+
+  public LayoutFeature getFeature(String name) {
+    return featureMap.get(name);
+  }
+
+  public void doFinalize(Object param) {
+    if (needsFinalization()){
+      Iterator<Map.Entry<Integer, LayoutFeature>> iterator = features
+          .tailMap(metadataLayoutVersion + 1).entrySet().iterator();
+      while (iterator.hasNext()) {
+        Map.Entry<Integer, LayoutFeature> f = iterator.next();
+        Optional<? extends LayoutFeature.UpgradeAction> upgradeAction =
+            f.getValue().onFinalizeAction();
+        upgradeAction.ifPresent(action -> action.executeAction(param));
+        // ToDo : Handle shutdown while iterating case (resume from last
+        //  feature).
+        metadataLayoutVersion = f.getKey();
+      }
+      // ToDo : Persist new MLV.
+    }
+  }
+
+  protected void reset() {
+    metadataLayoutVersion = 0;
+    softwareLayoutVersion = 0;
+    featureMap.clear();
+    features.clear();
+    isInitialized = false;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java
similarity index 55%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
copy to hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java
index d9024d1..05e944e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,7 +16,30 @@
  * limitations under the License.
  */
 
+package org.apache.hadoop.ozone.upgrade;
+
+import java.util.Optional;
+
 /**
- * Package contains classes related to S3 security responses.
+ * Generic Layout feature interface for Ozone.
  */
-package org.apache.hadoop.ozone.om.request.s3.security;
+public interface LayoutFeature {
+  String name();
+
+  int layoutVersion();
+
+  String description();
+
+  default Optional<? extends UpgradeAction> onFinalizeAction() {
+    return Optional.empty();
+  }
+
+  /**
+   * Generic UpgradeAction interface. An operation that is run on specific
+   * upgrade states like post finalize, pre-downgrade etc.
+   * @param <T>
+   */
+  interface UpgradeAction<T> {
+    void executeAction(T arg);
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java
new file mode 100644
index 0000000..432bd52
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.upgrade;
+
+/**
+ * Read Only interface to an Ozone component's Version Manager.
+ */
+public interface LayoutVersionManager {
+
+  /**
+   * Get the Current Metadata Layout Version.
+   * @return MLV
+   */
+  int getMetadataLayoutVersion();
+
+  /**
+   * Get the Current Software Layout Version.
+   * @return SLV
+   */
+  int getSoftwareLayoutVersion();
+
+  /**
+   * Does it need finalization?
+   * @return true/false
+   */
+  boolean needsFinalization();
+
+  /**
+   * Is allowed feature?
+   * @param layoutFeature feature object
+   * @return true/false.
+   */
+  boolean isAllowed(LayoutFeature layoutFeature);
+
+  /**
+   * Is allowed feature?
+   * @param featureName feature name
+   * @return true/false.
+   */
+  boolean isAllowed(String featureName);
+
+  /**
+   * Get Feature given feature name.
+   * @param name Feature name.
+   * @return LayoutFeature instance.
+   */
+  LayoutFeature getFeature(String name);
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java
similarity index 80%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
copy to hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java
index d9024d1..cf992d8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -17,6 +17,8 @@
  */
 
 /**
- * Package contains classes related to S3 security responses.
+ * This package contains classes for the Ozone upgrade and layout version
+ * management.
  */
-package org.apache.hadoop.ozone.om.request.s3.security;
+package org.apache.hadoop.ozone.upgrade;
+
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java
new file mode 100644
index 0000000..44fa100
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.upgrade;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test generic layout management init and APIs.
+ */
+public class TestAbstractLayoutVersionManager {
+
+  private AbstractLayoutVersionManager versionManager =
+      new MockVersionManager();
+
+  @Before
+  public void setUp() {
+    versionManager.reset();
+  }
+
+  @Test
+  public void testInit() {
+    versionManager.init(1,
+        getTestLayoutFeatures(2));
+    assertEquals(2, versionManager.features.size());
+    assertEquals(2, versionManager.featureMap.size());
+    assertEquals(1, versionManager.getMetadataLayoutVersion());
+    assertEquals(2, versionManager.getSoftwareLayoutVersion());
+    assertTrue(versionManager.needsFinalization());
+  }
+
+  @Test
+  public void testNeedsFinalization() {
+    versionManager.init(2, getTestLayoutFeatures(2));
+    assertFalse(versionManager.needsFinalization());
+  }
+
+  private LayoutFeature[] getTestLayoutFeatures(int num) {
+    LayoutFeature[] lfs = new LayoutFeature[num];
+    int k = 0;
+    for (int i = 1; i <= num; i++) {
+      int finalI = i;
+      lfs[k++] = new LayoutFeature() {
+        @Override
+        public String name() {
+          return "LF-" + finalI;
+        }
+
+        @Override
+        public int layoutVersion() {
+          return finalI;
+        }
+
+        @Override
+        public String description() {
+          return null;
+        }
+      };
+    }
+    return lfs;
+  }
+
+  static class MockVersionManager extends AbstractLayoutVersionManager {
+  }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml
index 0d239d0..7891666 100644
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@ -31,6 +31,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
   <dependencies>
 
     <dependency>
+      <groupId>org.aspectj</groupId>
+      <artifactId>aspectjrt</artifactId>
+      <version>1.8.9</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.aspectj</groupId>
+      <artifactId>aspectjweaver</artifactId>
+      <version>1.8.9</version>
+    </dependency>
+
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-common</artifactId>
     </dependency>
@@ -151,6 +163,25 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>aspectj-maven-plugin</artifactId>
+        <version>1.10</version>
+        <configuration>
+          <source>1.8</source>
+          <target>1.8</target>
+        </configuration>
+        <executions>
+          <execution>
+            <goals>
+              <goal>compile</goal>
+            </goals>
+            <configuration>
+              <complianceLevel>1.8</complianceLevel>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
     <testResources>
       <testResource>
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 212a8e1..5af09ee 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -141,6 +141,7 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.snapshot.OzoneManagerSnapshotProvider;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
@@ -1141,6 +1142,9 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     metadataManager.start(configuration);
     startSecretManagerIfNecessary();
 
+    OMLayoutVersionManager omVersionManager =
+        OMLayoutVersionManager.initialize(omStorage);
+
     if (certClient != null) {
       caCertPem = CertificateCodec.getPEMEncodedString(
           certClient.getCACertificate());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
index d9024d1..eb86413 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
@@ -19,4 +19,4 @@
 /**
  * Package contains classes related to S3 security responses.
  */
-package org.apache.hadoop.ozone.om.request.s3.security;
+package org.apache.hadoop.ozone.om.response.s3.security;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/NewOmFeatureUpgradeAction.java
similarity index 66%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/NewOmFeatureUpgradeAction.java
index d9024d1..bb4eb7c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/NewOmFeatureUpgradeAction.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,7 +16,17 @@
  * limitations under the License.
  */
 
+package org.apache.hadoop.ozone.om.upgrade;
+
+import org.apache.hadoop.ozone.om.OzoneManager;
+
 /**
- * Package contains classes related to S3 security responses.
+ * Stub OM Action class to help with understanding. Will be removed.
  */
-package org.apache.hadoop.ozone.om.request.s3.security;
+public class NewOmFeatureUpgradeAction implements OmUpgradeAction {
+
+  @Override
+  public void executeAction(OzoneManager ozoneManager) {
+    // Do blah....
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
similarity index 58%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
index d9024d1..2da8b38 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,7 +16,20 @@
  * limitations under the License.
  */
 
+package org.apache.hadoop.ozone.om.upgrade;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature;
+
 /**
- * Package contains classes related to S3 security responses.
+ * Annotation to specify if an API is backed up by a Layout Feature.
  */
-package org.apache.hadoop.ozone.om.request.s3.security;
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface OMLayoutFeatureAPI {
+  OMLayoutFeature value();
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java
new file mode 100644
index 0000000..a92e3b4
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.upgrade;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
+
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.upgrade.LayoutFeature;
+import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
+import org.aspectj.lang.JoinPoint;
+import org.aspectj.lang.annotation.Aspect;
+import org.aspectj.lang.annotation.Before;
+import org.aspectj.lang.reflect.MethodSignature;
+
+/**
+ * 'Aspect' for OM Layout Feature API. All methods annotated with the
+ * specific annotation will have pre-processing done here to check layout
+ * version compatibility.
+ */
+@Aspect
+public class OMLayoutFeatureAspect {
+
+  @Before("@annotation(OMLayoutFeatureAPI) && execution(* *(..))")
+  public void checkLayoutFeature(JoinPoint joinPoint) throws Throwable {
+    String featureName = ((MethodSignature) joinPoint.getSignature())
+        .getMethod().getAnnotation(OMLayoutFeatureAPI.class).value().name();
+    LayoutVersionManager lvm = OMLayoutVersionManager.getInstance();
+    if (!lvm.isAllowed(featureName)) {
+      LayoutFeature layoutFeature = lvm.getFeature(featureName);
+      throw new OMException(String.format("Operation %s cannot be invoked " +
+          "before finalization. Current layout version = %d, feature's layout" +
+              " version = %d",
+          featureName,
+          lvm.getMetadataLayoutVersion(),
+          layoutFeature.layoutVersion()), NOT_SUPPORTED_OPERATION);
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureCatalog.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureCatalog.java
new file mode 100644
index 0000000..c5ed27d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureCatalog.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.upgrade;
+
+import java.util.Optional;
+
+import org.apache.hadoop.ozone.upgrade.LayoutFeature;
+
+/**
+ * Catalog of Ozone Manager features.
+ */
+public class OMLayoutFeatureCatalog {
+
+  /**
+   * List of OM Features.
+   */
+  public enum OMLayoutFeature implements LayoutFeature {
+    INITIAL_VERSION(0, "Initial Layout Version"),
+    CREATE_EC(1, ""),
+    NEW_FEATURE(2, "new feature", new NewOmFeatureUpgradeAction());
+
+
+    private int layoutVersion;
+    private String description;
+    private Optional<OmUpgradeAction> omUpgradeAction = Optional.empty();
+
+    OMLayoutFeature(final int layoutVersion, String description) {
+      this.layoutVersion = layoutVersion;
+      this.description = description;
+    }
+
+    OMLayoutFeature(final int layoutVersion, String description,
+                    OmUpgradeAction upgradeAction) {
+      this.layoutVersion = layoutVersion;
+      this.description = description;
+      omUpgradeAction = Optional.of(upgradeAction);
+    }
+
+    @Override
+    public int layoutVersion() {
+      return layoutVersion;
+    }
+
+    @Override
+    public String description() {
+      return description;
+    }
+
+    @Override
+    public Optional<OmUpgradeAction> onFinalizeAction() {
+      return omUpgradeAction;
+    }
+  }
+
+  /**
+   * This is an example of an "API" that uses a new Layout feature (EC) that is
+   * not yet supported by the current layout version. The following can be
+   * "guarded" by just adding the following annotation, thereby keeping the
+   * method logic and upgrade logic separate.
+   */
+  @OMLayoutFeatureAPI(OMLayoutFeature.CREATE_EC)
+  public String ecMethod() {
+    // Blah Blah EC Blah....
+    return "ec";
+  }
+
+  /**
+   * This is an example of an "API" that uses a Layout feature (EC) that is
+   * supported by the current layout version.
+   */
+  @OMLayoutFeatureAPI(OMLayoutFeature.INITIAL_VERSION)
+  public String basicMethod() {
+    // Blah Blah Basic Blah....
+    return "basic";
+  }
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java
new file mode 100644
index 0000000..2f959a9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.upgrade;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
+
+import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature;
+import org.apache.hadoop.ozone.upgrade.AbstractLayoutVersionManager;
+import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Class to manage layout versions and features for Ozone Manager.
+ */
+public final class OMLayoutVersionManager extends AbstractLayoutVersionManager {
+
+  private static OMLayoutVersionManager omVersionManager;
+
+  private OMLayoutVersionManager() {
+  }
+
+  /**
+   * Read only instance to OM Version Manager.
+   * @return version manager instance.
+   */
+  public static synchronized LayoutVersionManager getInstance() {
+    if (omVersionManager == null) {
+      throw new RuntimeException("OM Layout Version Manager not yet " +
+          "initialized.");
+    }
+    return omVersionManager;
+  }
+
+
+  /**
+   * Initialize OM version manager from storage.
+   * @return version manager instance.
+   */
+  public static synchronized OMLayoutVersionManager initialize(
+      OMStorage omStorage)
+      throws OMException {
+    if (omVersionManager == null) {
+      omVersionManager = new OMLayoutVersionManager();
+      omVersionManager.init(omStorage);
+    }
+    return omVersionManager;
+  }
+
+  /**
+   * Initialize the OM Layout Features and current Layout Version.
+   * @param storage to read the current layout version.
+   * @throws OMException on error.
+   */
+  private void init(Storage storage) throws OMException {
+    init(storage.getLayoutVersion(), OMLayoutFeature.values());
+    if (metadataLayoutVersion > softwareLayoutVersion) {
+      throw new OMException(
+          String.format("Cannot initialize VersionManager. Metadata " +
+                  "layout version (%d) > software layout version (%d)",
+              metadataLayoutVersion, softwareLayoutVersion),
+          NOT_SUPPORTED_OPERATION);
+    }
+  }
+
+  @VisibleForTesting
+  protected synchronized static void resetLayoutVersionManager() {
+    if (omVersionManager != null) {
+      omVersionManager.reset();
+      omVersionManager = null;
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OmUpgradeAction.java
similarity index 67%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OmUpgradeAction.java
index d9024d1..da9a063 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OmUpgradeAction.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,7 +16,13 @@
  * limitations under the License.
  */
 
+package org.apache.hadoop.ozone.om.upgrade;
+
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeAction;
+
 /**
- * Package contains classes related to S3 security responses.
+ * Upgrade Action for OzoneManager which takes in an 'OM' instance.
  */
-package org.apache.hadoop.ozone.om.request.s3.security;
+public interface OmUpgradeAction extends UpgradeAction<OzoneManager> {
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/package-info.java
similarity index 86%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/package-info.java
index d9024d1..d663049 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/package-info.java
@@ -16,7 +16,8 @@
  * limitations under the License.
  */
 
+package org.apache.hadoop.ozone.om.upgrade;
+
 /**
- * Package contains classes related to S3 security responses.
- */
-package org.apache.hadoop.ozone.om.request.s3.security;
+ * This package contains OM  Upgrade related classes.
+ */
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java
index 49f28d3..73a2888 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.om.request.volume;
 
 import java.util.UUID;
 
-import org.junit.Assert;;
+import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java
new file mode 100644
index 0000000..b68c7c2
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.upgrade;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+/**
+ * Class to test annotation based interceptor that checks whether layout
+ * feature API is allowed.
+ */
+public class TestOMLayoutFeatureAspect {
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  private OzoneConfiguration configuration = new OzoneConfiguration();
+
+  @Before
+  public void setUp() throws IOException {
+    configuration.set("ozone.metadata.dirs",
+        temporaryFolder.newFolder().getAbsolutePath());
+  }
+
+  /**
+   * This unit test invokes the above 2 layout feature APIs. The first one
+   * should fail, and the second one should pass.
+   * @throws Exception
+   */
+  @Test
+  public void testCheckLayoutFeature() throws Exception {
+    OMLayoutVersionManager.initialize(new OMStorage(configuration));
+    OMLayoutFeatureCatalog testObj = new OMLayoutFeatureCatalog();
+    try {
+      testObj.ecMethod();
+      Assert.fail();
+    } catch (Exception ex) {
+      OMException omEx = (OMException) ex;
+      assertEquals(NOT_SUPPORTED_OPERATION, omEx.getResult());
+    }
+    String s = testObj.basicMethod();
+    assertEquals("basic", s);
+  }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
new file mode 100644
index 0000000..cfcfe24
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.upgrade;
+
+import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature.CREATE_EC;
+import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature.INITIAL_VERSION;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature;
+import org.apache.hadoop.ozone.upgrade.LayoutFeature;
+import org.junit.Test;
+
+/**
+ * Test OM layout version management.
+ */
+public class TestOMVersionManager {
+
+  @Test
+  public void testOMLayoutVersionManager() throws IOException {
+    OMStorage omStorage = mock(OMStorage.class);
+    when(omStorage.getLayoutVersion()).thenReturn(0);
+    OMLayoutVersionManager omVersionManager =
+        OMLayoutVersionManager.initialize(omStorage);
+    assertTrue(omVersionManager.isAllowed(INITIAL_VERSION));
+    assertFalse(omVersionManager.isAllowed(CREATE_EC));
+    assertEquals(0, omVersionManager.getMetadataLayoutVersion());
+    assertTrue(omVersionManager.needsFinalization());
+    omVersionManager.doFinalize(mock(OzoneManager.class));
+    assertFalse(omVersionManager.needsFinalization());
+    assertEquals(2, omVersionManager.getMetadataLayoutVersion());
+  }
+
+  @Test
+  public void testOMLayoutFeatureCatalog() {
+    OMLayoutFeature[] values = OMLayoutFeature.values();
+    int currVersion = Integer.MIN_VALUE;
+    for (LayoutFeature lf : values) {
+      assertTrue(currVersion <= lf.layoutVersion());
+      currVersion = lf.layoutVersion();
+    }
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org


[hadoop-ozone] 07/08: HDDS-4227. Implement a 'Prepare For Upgrade' step in OM that applies all committed Ratis transactions. (#1430)

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 08d8d36c66f5c10289f56e41d2be481c39b406ae
Author: avijayanhwx <14...@users.noreply.github.com>
AuthorDate: Tue Sep 29 09:41:43 2020 -0700

    HDDS-4227. Implement a 'Prepare For Upgrade' step in OM that applies all committed Ratis transactions. (#1430)
---
 hadoop-hdds/common/pom.xml                         |  8 ++
 .../hadoop/hdds/ratis/RatisUpgradeUtils.java       | 96 +++++++++++++++++++++
 .../hadoop/hdds/ratis/TestRatisUpgradeUtils.java   | 97 ++++++++++++++++++++++
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |  4 +
 .../apache/hadoop/ozone/om/OMStarterInterface.java |  2 +
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 64 ++++++++++++--
 .../hadoop/ozone/om/OzoneManagerStarter.java       | 38 +++++++++
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java   |  2 +-
 .../ozone/om/ratis/OzoneManagerRatisServer.java    |  4 +
 .../hadoop/ozone/om/TestOzoneManagerStarter.java   | 11 +++
 10 files changed, 319 insertions(+), 7 deletions(-)

diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 8525976..4a17336 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -193,6 +193,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>hamcrest-all</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>com.codahale.metrics</groupId>
+      <artifactId>metrics-core</artifactId>
+      <version>3.0.2</version>
+      <scope>test</scope>
+      <!-- Needed for mocking RaftServerImpl -->
+    </dependency>
+
   </dependencies>
 
   <build>
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisUpgradeUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisUpgradeUtils.java
new file mode 100644
index 0000000..796668d
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisUpgradeUtils.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.ratis;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.ratis.protocol.RaftGroup;
+import org.apache.ratis.server.impl.RaftServerImpl;
+import org.apache.ratis.server.impl.RaftServerProxy;
+import org.apache.ratis.statemachine.StateMachine;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Ratis utility functions.
+ */
+public final class RatisUpgradeUtils {
+
+  private RatisUpgradeUtils() {
+  }
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(RatisUpgradeUtils.class);
+
+  /**
+   * Flush all committed transactions in a given Raft Server for a given group.
+   * @param stateMachine state machine to use
+   * @param raftGroup raft group
+   * @param server Raft server proxy instance.
+   * @param maxTimeToWaitSeconds Max time to wait before declaring failure.
+   * @throws InterruptedException when interrupted
+   * @throws IOException on error while waiting
+   */
+  public static void waitForAllTxnsApplied(
+      StateMachine stateMachine,
+      RaftGroup raftGroup,
+      RaftServerProxy server,
+      long maxTimeToWaitSeconds,
+      long timeBetweenRetryInSeconds)
+      throws InterruptedException, IOException {
+
+    long intervalTime = TimeUnit.SECONDS.toMillis(timeBetweenRetryInSeconds);
+    long endTime = System.currentTimeMillis() +
+        TimeUnit.SECONDS.toMillis(maxTimeToWaitSeconds);
+    boolean success = false;
+    while (System.currentTimeMillis() < endTime) {
+      success = checkIfAllTransactionsApplied(stateMachine, server, raftGroup);
+      if (success) {
+        break;
+      }
+      Thread.sleep(intervalTime);
+    }
+
+    if (!success) {
+      throw new IOException(String.format("After waiting for %d seconds, " +
+          "State Machine has not applied  all the transactions.",
+          maxTimeToWaitSeconds));
+    }
+
+    long snapshotIndex = stateMachine.takeSnapshot();
+    if (snapshotIndex != stateMachine.getLastAppliedTermIndex().getIndex()) {
+      throw new IOException("Index from Snapshot does not match last applied " +
+          "Index");
+    }
+  }
+
+  private static boolean checkIfAllTransactionsApplied(
+      StateMachine stateMachine,
+      RaftServerProxy serverProxy,
+      RaftGroup raftGroup) throws IOException {
+    LOG.info("Checking for pending transactions to be applied.");
+    RaftServerImpl impl = serverProxy.getImpl(raftGroup.getGroupId());
+    long lastCommittedIndex = impl.getState().getLog().getLastCommittedIndex();
+    long appliedIndex = stateMachine.getLastAppliedTermIndex().getIndex();
+    LOG.info("lastCommittedIndex = {}, appliedIndex = {}",
+        lastCommittedIndex, appliedIndex);
+    return (lastCommittedIndex == appliedIndex);
+  }
+
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestRatisUpgradeUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestRatisUpgradeUtils.java
new file mode 100644
index 0000000..078bbb5
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestRatisUpgradeUtils.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.ratis;
+
+import static org.apache.hadoop.hdds.ratis.RatisUpgradeUtils.waitForAllTxnsApplied;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.apache.ratis.protocol.RaftGroup;
+import org.apache.ratis.server.impl.RaftServerImpl;
+import org.apache.ratis.server.impl.RaftServerProxy;
+import org.apache.ratis.server.impl.ServerState;
+import org.apache.ratis.server.protocol.TermIndex;
+import org.apache.ratis.server.raftlog.RaftLog;
+import org.apache.ratis.statemachine.StateMachine;
+import org.junit.Test;
+
+/**
+ * Testing util methods in TestRatisUpgradeUtils.
+ */
+public class TestRatisUpgradeUtils {
+
+  @Test
+  public void testWaitForAllTxnsApplied() throws IOException,
+      InterruptedException {
+
+    StateMachine stateMachine = mock(StateMachine.class);
+    RaftGroup raftGroup = RaftGroup.emptyGroup();
+    RaftServerProxy raftServerProxy = mock(RaftServerProxy.class);
+    RaftServerImpl raftServer = mock(RaftServerImpl.class);
+    ServerState serverState = mock(ServerState.class);
+    RaftLog raftLog = mock(RaftLog.class);
+
+    when(raftServerProxy.getImpl(
+        raftGroup.getGroupId())).thenReturn(raftServer);
+    when(raftServer.getState()).thenReturn(serverState);
+    when(serverState.getLog()).thenReturn(raftLog);
+    when(raftLog.getLastCommittedIndex()).thenReturn(1L);
+
+    TermIndex termIndex = mock(TermIndex.class);
+    when(termIndex.getIndex()).thenReturn(0L).thenReturn(0L).thenReturn(1L);
+    when(stateMachine.getLastAppliedTermIndex()).thenReturn(termIndex);
+    when(stateMachine.takeSnapshot()).thenReturn(1L);
+
+    waitForAllTxnsApplied(stateMachine, raftGroup, raftServerProxy, 10, 2);
+    verify(stateMachine.getLastAppliedTermIndex(),
+        times(4)); // 3 checks + 1 after snapshot
+  }
+
+  @Test
+  public void testWaitForAllTxnsAppliedTimeOut() throws Exception {
+
+    StateMachine stateMachine = mock(StateMachine.class);
+    RaftGroup raftGroup = RaftGroup.emptyGroup();
+    RaftServerProxy raftServerProxy = mock(RaftServerProxy.class);
+    RaftServerImpl raftServer = mock(RaftServerImpl.class);
+    ServerState serverState = mock(ServerState.class);
+    RaftLog raftLog = mock(RaftLog.class);
+
+    when(raftServerProxy.getImpl(
+        raftGroup.getGroupId())).thenReturn(raftServer);
+    when(raftServer.getState()).thenReturn(serverState);
+    when(serverState.getLog()).thenReturn(raftLog);
+    when(raftLog.getLastCommittedIndex()).thenReturn(1L);
+
+    TermIndex termIndex = mock(TermIndex.class);
+    when(termIndex.getIndex()).thenReturn(0L);
+    when(stateMachine.getLastAppliedTermIndex()).thenReturn(termIndex);
+    when(stateMachine.takeSnapshot()).thenReturn(1L);
+
+    LambdaTestUtils.intercept(IOException.class, "State Machine has not " +
+        "applied  all the transactions", () ->
+        waitForAllTxnsApplied(stateMachine, raftGroup, raftServerProxy,
+            10, 2));
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index f16679a..fba6dcc 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -246,4 +246,8 @@ public final class OMConfigKeys {
       "ozone.om.enable.filesystem.paths";
   public static final boolean OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT =
       false;
+
+  public static final long OZONE_OM_MAX_TIME_TO_WAIT_FLUSH_TXNS =
+      TimeUnit.MINUTES.toSeconds(5);
+  public static final long OZONE_OM_FLUSH_TXNS_RETRY_INTERVAL_SECONDS = 5L;
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java
index f632ad1..14252a7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java
@@ -30,4 +30,6 @@ public interface OMStarterInterface {
       AuthenticationException;
   boolean init(OzoneConfiguration conf) throws IOException,
       AuthenticationException;
+  boolean prepareForUpgrade(OzoneConfiguration conf) throws IOException,
+      AuthenticationException;
 }
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index b7a6b4f..3a2555a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -193,6 +193,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
+import static org.apache.hadoop.hdds.ratis.RatisUpgradeUtils.waitForAllTxnsApplied;
 import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
 import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName;
 import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
@@ -215,10 +216,12 @@ import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FLUSH_TXNS_RETRY_INTERVAL_SECONDS;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MAX_TIME_TO_WAIT_FLUSH_TXNS;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
@@ -234,6 +237,7 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.
 
 import org.apache.hadoop.util.Time;
 import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
+import org.apache.ratis.server.impl.RaftServerProxy;
 import org.apache.ratis.server.protocol.TermIndex;
 import org.apache.ratis.util.ExitUtils;
 import org.apache.ratis.util.FileUtils;
@@ -327,20 +331,22 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   private final boolean useRatisForReplication;
 
   private boolean isNativeAuthorizerEnabled;
+  private boolean prepareForUpgrade;
 
   private ExitManager exitManager;
 
   private enum State {
     INITIALIZED,
     RUNNING,
+    PREPARING_FOR_UPGRADE,
     STOPPED
   }
 
   // Used in MiniOzoneCluster testing
   private State omState;
 
-  private OzoneManager(OzoneConfiguration conf) throws IOException,
-      AuthenticationException {
+  private OzoneManager(OzoneConfiguration conf, boolean forUpgrade)
+      throws IOException, AuthenticationException {
     super(OzoneVersionInfo.OZONE_VERSION_INFO);
     Preconditions.checkNotNull(conf);
     configuration = conf;
@@ -486,6 +492,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     };
     ShutdownHookManager.get().addShutdownHook(shutdownHook,
         SHUTDOWN_HOOK_PRIORITY);
+    this.prepareForUpgrade = forUpgrade;
     omState = State.INITIALIZED;
   }
 
@@ -919,7 +926,12 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    */
   public static OzoneManager createOm(OzoneConfiguration conf)
       throws IOException, AuthenticationException {
-    return new OzoneManager(conf);
+    return new OzoneManager(conf, false);
+  }
+
+  public static OzoneManager createOmUpgradeMode(OzoneConfiguration conf)
+      throws IOException, AuthenticationException {
+    return new OzoneManager(conf, true);
   }
 
   /**
@@ -995,6 +1007,39 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     }
   }
 
+  public boolean applyAllPendingTransactions()
+      throws InterruptedException, IOException {
+
+    if (!isRatisEnabled) {
+      LOG.info("Ratis not enabled. Nothing to do.");
+      return true;
+    }
+
+    waitForAllTxnsApplied(omRatisServer.getOmStateMachine(),
+        omRatisServer.getRaftGroup(),
+        (RaftServerProxy) omRatisServer.getServer(),
+        OZONE_OM_MAX_TIME_TO_WAIT_FLUSH_TXNS,
+        OZONE_OM_FLUSH_TXNS_RETRY_INTERVAL_SECONDS);
+
+    long appliedIndexFromRatis =
+        omRatisServer.getOmStateMachine().getLastAppliedTermIndex().getIndex();
+    OMTransactionInfo omTransactionInfo =
+        OMTransactionInfo.readTransactionInfo(metadataManager);
+    long index = omTransactionInfo.getTermIndex().getIndex();
+    if (index != appliedIndexFromRatis) {
+      throw new IllegalStateException(
+          String.format("Cannot prepare OM for Upgrade " +
+          "since transaction info table index %d does not match ratis %s",
+              index, appliedIndexFromRatis));
+    }
+
+    LOG.info("OM has been prepared for upgrade. All transactions " +
+        "upto {} have been flushed to the state machine, " +
+        "and a snapshot has been taken.",
+        omRatisServer.getOmStateMachine().getLastAppliedTermIndex().getIndex());
+    return true;
+  }
+
   /**
    * Initializes secure OzoneManager.
    */
@@ -1180,15 +1225,22 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       // Allow OM to start as Http Server failure is not fatal.
       LOG.error("OM HttpServer failed to start.", ex);
     }
-    omRpcServer.start();
-    isOmRpcServerRunning = true;
 
+    if (!prepareForUpgrade) {
+      omRpcServer.start();
+      isOmRpcServerRunning = true;
+    }
     registerMXBean();
 
     startJVMPauseMonitor();
     setStartTime();
-    omState = State.RUNNING;
 
+    if (!prepareForUpgrade) {
+      omState = State.RUNNING;
+    } else {
+      omState = State.PREPARING_FOR_UPGRADE;
+      LOG.info("Started OM services in upgrade mode.");
+    }
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
index 6dc4aea..936980b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
@@ -98,6 +98,28 @@ public class OzoneManagerStarter extends GenericCli {
     }
   }
 
+
+  /**
+   * This function implements a sub-command to allow the OM to be
+   * "prepared for upgrade".
+   */
+  @CommandLine.Command(name = "--prepareForUpgrade",
+      aliases = {"--prepareForDowngrade", "--flushTransactions"},
+      customSynopsis = "ozone om [global options] --prepareForUpgrade",
+      hidden = false,
+      description = "Prepare the OM for upgrade/downgrade. (Flush Raft log " +
+          "transactions.)",
+      mixinStandardHelpOptions = true,
+      versionProvider = HddsVersionProvider.class)
+  public void prepareOmForUpgrade() throws Exception {
+    commonInit();
+    boolean result = receiver.prepareForUpgrade(conf);
+    if (!result) {
+      throw new Exception("Prepare OM For Upgrade failed.");
+    }
+    System.exit(0);
+  }
+
   /**
    * This function should be called by each command to ensure the configuration
    * is set and print the startup banner message.
@@ -130,6 +152,22 @@ public class OzoneManagerStarter extends GenericCli {
         AuthenticationException {
       return OzoneManager.omInit(conf);
     }
+
+    public boolean prepareForUpgrade(OzoneConfiguration conf)
+        throws IOException, AuthenticationException {
+      try (OzoneManager om = OzoneManager.createOmUpgradeMode(conf)) {
+        om.start();
+        boolean success = false;
+        try {
+          LOG.info("Preparing OM for upgrade.");
+          success = om.applyAllPendingTransactions();
+        } catch (InterruptedException e) {
+          LOG.error("Error preparing OM for upgrade.", e);
+          Thread.currentThread().interrupt();
+        }
+        return success;
+      }
+    }
   }
 
 }
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index 68d359e..dcf7984 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -319,7 +319,7 @@ public final class OzoneManagerDoubleBuffer {
 
           if (LOG.isDebugEnabled()) {
             LOG.debug("Sync Iteration {} flushed transactions in this " +
-                    "iteration{}", flushIterations.get(),
+                    "iteration {}", flushIterations.get(),
                 flushedTransactionsSize);
           }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
index 4b8c11a..c5868f1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
@@ -698,4 +698,8 @@ public final class OzoneManagerRatisServer {
   public TermIndex getLastAppliedTermIndex() {
     return omStateMachine.getLastAppliedTermIndex();
   }
+
+  public RaftServer getServer() {
+    return server;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
index 8028169..c137991 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
@@ -133,6 +133,8 @@ public class TestOzoneManagerStarter {
     private boolean initStatus = true;
     private boolean throwOnStart = false;
     private boolean throwOnInit = false;
+    private boolean prepareUpgradeCalled = false;
+    private boolean throwOnPrepareUpgrade = false;
 
     public void start(OzoneConfiguration conf) throws IOException,
         AuthenticationException {
@@ -150,5 +152,14 @@ public class TestOzoneManagerStarter {
       }
       return initStatus;
     }
+
+    public boolean prepareForUpgrade(OzoneConfiguration conf)
+        throws IOException, AuthenticationException {
+      prepareUpgradeCalled = true;
+      if (throwOnPrepareUpgrade) {
+        throw new IOException("Simulated Exception");
+      }
+      return true;
+    }
   }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org


[hadoop-ozone] 05/08: HDDS-4143. Implement a factory for OM Requests that returns an instance based on layout version. (#1405)

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 6fa5fa1cd09abdba88c996e4adadcc35bca966e4
Author: avijayanhwx <14...@users.noreply.github.com>
AuthorDate: Tue Sep 15 10:40:24 2020 -0700

    HDDS-4143. Implement a factory for OM Requests that returns an instance based on layout version. (#1405)
---
 .../upgrade/LayoutVersionInstanceFactory.java      | 247 +++++++++++++++++++++
 .../hadoop/ozone/upgrade/VersionFactoryKey.java    |  70 ++++++
 .../upgrade/TestLayoutVersionInstanceFactory.java  | 191 ++++++++++++++++
 .../src/main/proto/OmClientProtocol.proto          |   5 +
 hadoop-ozone/ozone-manager/pom.xml                 |   1 -
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  11 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     | 186 ++++++----------
 .../hadoop/ozone/om/request/OMClientRequest.java   |   8 +-
 .../om/request/bucket/OMBucketCreateRequest.java   |   4 +
 .../om/request/bucket/OMBucketDeleteRequest.java   |   5 +
 .../request/bucket/OMBucketSetPropertyRequest.java |   5 +
 .../request/bucket/acl/OMBucketAddAclRequest.java  |   6 +
 .../bucket/acl/OMBucketRemoveAclRequest.java       |   7 +
 .../request/bucket/acl/OMBucketSetAclRequest.java  |   7 +
 .../om/request/file/OMDirectoryCreateRequest.java  |   5 +
 .../ozone/om/request/file/OMFileCreateRequest.java |  10 +-
 .../om/request/key/OMAllocateBlockRequest.java     |   5 +
 .../ozone/om/request/key/OMECKeyCreateRequest.java |  54 +++++
 .../ozone/om/request/key/OMKeyCommitRequest.java   |   5 +
 .../ozone/om/request/key/OMKeyCreateRequest.java   |   4 +
 .../ozone/om/request/key/OMKeyDeleteRequest.java   |   5 +
 .../ozone/om/request/key/OMKeyPurgeRequest.java    |   6 +
 .../ozone/om/request/key/OMKeyRenameRequest.java   |   5 +
 .../ozone/om/request/key/OMKeysDeleteRequest.java  |   5 +
 .../ozone/om/request/key/OMKeysRenameRequest.java  |   5 +
 .../om/request/key/OMTrashRecoverRequest.java      |   4 +
 .../om/request/key/acl/OMKeyAddAclRequest.java     |   7 +
 .../om/request/key/acl/OMKeyRemoveAclRequest.java  |   7 +
 .../om/request/key/acl/OMKeySetAclRequest.java     |   7 +
 .../key/acl/prefix/OMPrefixAddAclRequest.java      |   7 +
 .../key/acl/prefix/OMPrefixRemoveAclRequest.java   |   7 +
 .../key/acl/prefix/OMPrefixSetAclRequest.java      |   7 +
 .../S3InitiateMultipartUploadRequest.java          |   5 +
 .../multipart/S3MultipartUploadAbortRequest.java   |   5 +
 .../S3MultipartUploadCommitPartRequest.java        |   4 +
 .../S3MultipartUploadCompleteRequest.java          |   6 +
 .../om/request/s3/security/S3GetSecretRequest.java |   5 +
 .../security/OMCancelDelegationTokenRequest.java   |   7 +-
 .../security/OMGetDelegationTokenRequest.java      |   7 +-
 .../security/OMRenewDelegationTokenRequest.java    |   6 +
 .../upgrade/OMFinalizeUpgradeProgressRequest.java  |   6 +
 .../request/upgrade/OMFinalizeUpgradeRequest.java  |   9 +-
 .../om/request/volume/OMVolumeCreateRequest.java   |   5 +
 .../om/request/volume/OMVolumeDeleteRequest.java   |   6 +
 .../om/request/volume/OMVolumeSetOwnerRequest.java |   5 +
 .../om/request/volume/OMVolumeSetQuotaRequest.java |   5 +
 .../request/volume/acl/OMVolumeAddAclRequest.java  |   7 +
 .../volume/acl/OMVolumeRemoveAclRequest.java       |   7 +
 .../request/volume/acl/OMVolumeSetAclRequest.java  |   7 +
 ...FeatureAPI.java => BelongsToLayoutVersion.java} |  14 +-
 ...eAPI.java => DisallowedUntilLayoutVersion.java} |   8 +-
 .../hadoop/ozone/om/upgrade/OMLayoutFeature.java   |  63 ++++++
 .../ozone/om/upgrade/OMLayoutFeatureAspect.java    |  20 +-
 .../ozone/om/upgrade/OMLayoutFeatureCatalog.java   |  93 --------
 ...outFeatureAPI.java => OMLayoutFeatureUtil.java} |  36 ++-
 .../ozone/om/upgrade/OMLayoutVersionManager.java   |  92 --------
 .../om/upgrade/OMLayoutVersionManagerImpl.java     | 177 +++++++++++++++
 ...FeatureAPI.java => OmLayoutVersionManager.java} |  16 +-
 ...OzoneManagerProtocolServerSideTranslatorPB.java |   9 +-
 .../protocolPB/OzoneManagerRequestHandler.java     |  12 +-
 .../ozone/om/request/bucket/TestBucketRequest.java |   5 +
 .../ozone/om/request/key/TestOMKeyRequest.java     |   5 +
 .../om/request/volume/TestOMVolumeRequest.java     |   9 +-
 .../om/upgrade/TestOMLayoutFeatureAspect.java      |   4 +-
 .../ozone/om/upgrade/TestOMVersionManager.java     |  49 +++-
 .../TestOmVersionManagerRequestFactory.java        | 122 ++++++++++
 hadoop-ozone/s3gateway/pom.xml                     |   5 +
 67 files changed, 1378 insertions(+), 371 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java
new file mode 100644
index 0000000..96463e0
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java
@@ -0,0 +1,247 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.upgrade;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.stream.Collectors.toList;
+
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.PriorityQueue;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Generic factory which stores different instances of Type 'T' sharded by
+ * a key & version. A single key can be associated with different versions
+ * of 'T'.
+ *
+ * Why does this class exist?
+ * A typical use case during upgrade is to have multiple versions of a class
+ * / method / object and chose them based  on current layout
+ * version at runtime. Before finalizing, an older version is typically
+ * needed, and after finalize, a newer version is needed. This class serves
+ * this purpose in a generic way.
+ *
+ * For example, we can create a Factory to create multiple versions of
+ * OMRequests sharded by Request Type & Layout Version Supported.
+ */
+public class LayoutVersionInstanceFactory<T> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(LayoutVersionInstanceFactory.class);
+
+  /**
+   * The factory will maintain ALL instances > MLV and 1 instance <= MLV in a
+   * priority queue (ordered by version). By doing that it guarantees O(1)
+   * lookup at all times, since we always would lookup the first element (top
+   * of the PQ).
+   * Multiple entries will be there ONLY during pre-finalized state.
+   * On finalization, we will be removing the entry one by one until we reach
+   * a single entry. On a regular component instance (finalized), there will
+   * be a single request version associated with a request always.
+   */
+  private final Map<String, PriorityQueue<VersionedInstance<T>>> instances =
+      new HashMap<>();
+
+  /**
+   * Register an instance with a given factory key (key + version).
+   * For safety reasons we dont allow (1) re-registering, (2) registering an
+   * instance with version > SLV.
+   *
+   * @param lvm LayoutVersionManager
+   * @param key VersionFactoryKey key to associate with instance.
+   * @param instance instance to register.
+   */
+  public boolean register(LayoutVersionManager lvm, VersionFactoryKey key,
+                       T instance) {
+    // If version is not passed in, go defensive and set the highest possible
+    // version (SLV).
+    int version = key.getVersion() == null ?
+        lvm.getSoftwareLayoutVersion() : key.getVersion();
+
+    checkArgument(lvm.getSoftwareLayoutVersion() >= key.getVersion(),
+        String.format("Cannot register key %s since the version is greater " +
+                "than the Software layout version %d",
+        key, lvm.getSoftwareLayoutVersion()));
+
+    // If we reach here, we know that the passed in version belongs to
+    // [0, SLV].
+    String primaryKey = key.getKey();
+    instances.computeIfAbsent(primaryKey, s ->
+        new PriorityQueue<>(Comparator.comparingInt(o -> o.version)));
+
+    PriorityQueue<VersionedInstance<T>> versionedInstances =
+        instances.get(primaryKey);
+    Optional<VersionedInstance<T>> existingInstance =
+        versionedInstances.parallelStream()
+        .filter(v -> v.version == key.getVersion()).findAny();
+
+    if (existingInstance.isPresent()) {
+      throw new IllegalArgumentException(String.format("Cannot register key " +
+          "%s since there is an existing entry already.", key));
+    }
+
+    if (!versionedInstances.isEmpty() && isValid(lvm, version)) {
+      VersionedInstance<T> currentPeek = versionedInstances.peek();
+      if (currentPeek.version < version) {
+        // Current peek < passed in version (and <= MLV). Hence, we can
+        // remove it, since the passed in a better candidate.
+        versionedInstances.poll();
+        // Add the passed in instance.
+        versionedInstances.offer(new VersionedInstance<>(version, instance));
+        return true;
+      } else if (currentPeek.version > lvm.getMetadataLayoutVersion()) {
+        // Current peak is > MLV, hence we don't need to remove that. Just
+        // add passed in instance.
+        versionedInstances.offer(new VersionedInstance<>(version, instance));
+        return true;
+      } else {
+        // Current peek <= MLV and > passed in version, and hence a better
+        // canidate. Retaining the peek, and ignoring the passed in instance.
+        return false;
+      }
+    } else {
+      // Passed in instance version > MLV (or the first version to be
+      // registered), hence can be registered.
+      versionedInstances.offer(new VersionedInstance<>(version, instance));
+      return true;
+    }
+  }
+
+  private boolean isValid(LayoutVersionManager lvm, int version) {
+    return version <= lvm.getMetadataLayoutVersion();
+  }
+
+  /**
+   * From the list of versioned instances for a given "key", this
+   * returns the "floor" value corresponding to the given version.
+   * For example, if we have key = "CreateKey",  entry -> [(1, CreateKeyV1),
+   * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
+   * return CreateKeyV1.
+   * Since this is a priority queue based implementation, we use a O(1) peek()
+   * lookup to get the current valid version.
+   * @param lvm LayoutVersionManager
+   * @param key Key and Version.
+   * @return instance.
+   */
+  public T get(LayoutVersionManager lvm, VersionFactoryKey key) {
+    Integer version = key.getVersion();
+    // If version is not passed in, go defensive and set the highest allowed
+    // version (MLV).
+    if (version == null) {
+      version = lvm.getMetadataLayoutVersion();
+    }
+
+    checkArgument(lvm.getMetadataLayoutVersion() >= version,
+        String.format("Cannot get key %s since the version is greater " +
+                "than the Metadata layout version %d",
+            key, lvm.getMetadataLayoutVersion()));
+
+    String primaryKey = key.getKey();
+    PriorityQueue<VersionedInstance<T>> versionedInstances =
+        instances.get(primaryKey);
+    if (versionedInstances == null || versionedInstances.isEmpty()) {
+      throw new IllegalArgumentException(
+          "No suitable instance found for request : " + key);
+    }
+
+    VersionedInstance<T> value = versionedInstances.peek();
+    if (value == null || value.version > version) {
+      throw new IllegalArgumentException(
+          "No suitable instance found for request : " + key);
+    } else {
+      return value.instance;
+    }
+  }
+
+  /**
+   * To be called on finalization when there is an MLV update.
+   * @param lvm LayoutVersionManager instance.
+   */
+  public void onFinalize(LayoutVersionManager lvm) {
+    Iterator<Map.Entry<String, PriorityQueue<VersionedInstance<T>>>> iterator =
+        instances.entrySet().iterator();
+    while (iterator.hasNext()) {
+      Map.Entry<String, PriorityQueue<VersionedInstance<T>>> next =
+          iterator.next();
+      PriorityQueue<VersionedInstance<T>> vInstances = next.getValue();
+      VersionedInstance<T> prevInstance = null;
+      while (!vInstances.isEmpty() &&
+          vInstances.peek().version < lvm.getMetadataLayoutVersion()) {
+        prevInstance = vInstances.poll();
+        LOG.info("Unregistering {} from factory. ", prevInstance.instance);
+      }
+
+      if ((vInstances.isEmpty() ||
+          vInstances.peek().version > lvm.getMetadataLayoutVersion())
+          && prevInstance != null) {
+        vInstances.offer(prevInstance);
+      }
+
+      if (vInstances.isEmpty()) {
+        LOG.info("Unregistering '{}' from factory since it has no entries.",
+            next.getKey());
+        iterator.remove();
+      }
+    }
+  }
+
+  @VisibleForTesting
+  protected Map<String, List<T>> getInstances()  {
+    Map<String, List<T>> instancesCopy = new HashMap<>();
+    instances.forEach((key, value) -> {
+      List<T> collect =
+          value.stream().map(v -> v.instance).collect(toList());
+      instancesCopy.put(key, collect);
+    });
+    return Collections.unmodifiableMap(instancesCopy);
+  }
+
+  /**
+   * Class to encapsulate a instance with version. Not meant to be exposed
+   * outside this class.
+   * @param <T> instance
+   */
+  static class VersionedInstance<T> {
+    private int version;
+    private T instance;
+
+    VersionedInstance(int version, T instance) {
+      this.version = version;
+      this.instance = instance;
+    }
+
+    public long getVersion() {
+      return version;
+    }
+
+    public T getInstance() {
+      return instance;
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java
new file mode 100644
index 0000000..bda45f5
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.upgrade;
+
+/**
+ * "Key" element to the Version specific instance factory. Currently it has 2
+ * dimensions -> a 'key' string and a version. This is to support a factory
+ * which returns an instance for a given "key" and "version".
+ */
+public class VersionFactoryKey {
+  private String key;
+  private Integer version;
+
+  public VersionFactoryKey(String key, Integer version) {
+    this.key = key;
+    this.version = version;
+  }
+
+  public String getKey() {
+    return key;
+  }
+
+  public Integer getVersion() {
+    return version;
+  }
+
+  @Override
+  public String toString() {
+    return getClass().getSimpleName() + " : [" + key + ", "
+        + version  + "]";
+  }
+
+  /**
+   * Builder for above key.
+   */
+  public static class Builder {
+    private String key;
+    private Integer version;
+
+    public Builder key(String k) {
+      this.key = k;
+      return this;
+    }
+
+    public Builder version(Integer v) {
+      this.version = v;
+      return this;
+    }
+
+    public VersionFactoryKey build() {
+      return new VersionFactoryKey(key, version);
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
new file mode 100644
index 0000000..e0bb185
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
@@ -0,0 +1,191 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.upgrade;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.function.Supplier;
+
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.Test;
+
+/**
+ * Test out APIs of VersionSpecificInstanceFactory.
+ */
+public class TestLayoutVersionInstanceFactory {
+
+  private MockInterface m1 = new MockClassV1();
+  private MockInterface m2 = new MockClassV2();
+
+
+  @Test
+  public void testRegister() throws Exception {
+    LayoutVersionManager lvm = getMockLvm(1, 2);
+    LayoutVersionInstanceFactory<MockInterface> factory =
+        new LayoutVersionInstanceFactory<>();
+
+    assertTrue(factory.register(lvm, getKey("key", 0), m1));
+    assertTrue(factory.register(lvm, getKey("key", 1), m1));
+    assertTrue(factory.register(lvm, getKey("key", 2), m2));
+
+    assertEquals(1, factory.getInstances().size());
+    assertEquals(2, factory.getInstances().get("key").size());
+
+    // Should fail on re-registration.
+    LambdaTestUtils.intercept(IllegalArgumentException.class,
+        "existing entry already",
+        () -> factory.register(lvm, getKey("key", 1), new MockClassV1()));
+    assertEquals(1, factory.getInstances().size());
+
+    // Verify SLV check.
+    LambdaTestUtils.intercept(IllegalArgumentException.class,
+        "version is greater",
+        () -> factory.register(lvm, getKey("key2", 4), new MockClassV2()));
+
+  }
+
+  @Test
+  public void testGet() throws Exception {
+    LayoutVersionManager lvm = getMockLvm(2, 3);
+    LayoutVersionInstanceFactory<MockInterface> factory =
+        new LayoutVersionInstanceFactory<>();
+    assertTrue(factory.register(lvm, getKey("key", 0), null));
+    assertTrue(factory.register(lvm, getKey("key", 1), m1));
+    assertTrue(factory.register(lvm, getKey("key", 3), m2));
+
+    MockInterface val = factory.get(lvm, getKey("key", 2));
+    assertTrue(val instanceof MockClassV1);
+
+    // Not passing in version --> Use MLV.
+    val = factory.get(lvm, getKey("key", null));
+    assertTrue(val instanceof MockClassV1);
+
+    // MLV check.
+    LambdaTestUtils.intercept(IllegalArgumentException.class,
+        "version is greater",
+        () -> factory.get(lvm, getKey("key", 3)));
+
+    // Verify failure on Unknown request.
+    LambdaTestUtils.intercept(IllegalArgumentException.class,
+        "No suitable instance found",
+        () -> factory.get(lvm, getKey("key1", 1)));
+  }
+
+  @Test
+  public void testMethodBasedVersionFactory() {
+    LayoutVersionManager lvm = getMockLvm(1, 2);
+    LayoutVersionInstanceFactory<Supplier<String>> factory =
+        new LayoutVersionInstanceFactory<>();
+
+    MockClassWithVersionedAPIs m = new MockClassWithVersionedAPIs();
+    factory.register(lvm, getKey("method", 1), m::mockMethodV1);
+    factory.register(lvm, getKey("method", 2), m::mockMethodV2);
+
+    Supplier<String> method = factory.get(lvm, getKey("method", 1));
+    assertEquals("v1", method.get());
+  }
+
+
+  private VersionFactoryKey getKey(String key, Integer version) {
+    VersionFactoryKey.Builder vfKey = new VersionFactoryKey.Builder().key(key);
+    if (version != null) {
+      vfKey.version(version);
+    }
+    return vfKey.build();
+  }
+
+
+
+  @Test
+  public void testOnFinalize() {
+    LayoutVersionManager lvm = getMockLvm(1, 3);
+    LayoutVersionInstanceFactory<MockInterface> factory =
+        new LayoutVersionInstanceFactory<>();
+    assertTrue(factory.register(lvm, getKey("key", 1), m1));
+    assertTrue(factory.register(lvm, getKey("key", 3), m2));
+    assertTrue(factory.register(lvm, getKey("key2", 1), m1));
+    assertTrue(factory.register(lvm, getKey("key2", 2), m2));
+
+    MockInterface val = factory.get(lvm, getKey("key", null));
+    assertTrue(val instanceof MockClassV1);
+    assertEquals(2, factory.getInstances().size());
+    assertEquals(2, factory.getInstances().get("key").size());
+
+    val = factory.get(lvm, getKey("key2", null));
+    assertTrue(val instanceof MockClassV1);
+
+    // Finalize the layout version.
+    lvm = getMockLvm(3, 3);
+    factory.onFinalize(lvm);
+
+    val = factory.get(lvm, getKey("key", null));
+    assertTrue(val instanceof MockClassV2);
+    assertEquals(2, factory.getInstances().size());
+    assertEquals(1, factory.getInstances().get("key").size());
+
+    val = factory.get(lvm, getKey("key2", null));
+    assertTrue(val instanceof MockClassV2);
+  }
+
+  private LayoutVersionManager getMockLvm(int mlv, int slv) {
+    LayoutVersionManager lvm = mock(LayoutVersionManager.class);
+    when(lvm.getMetadataLayoutVersion()).thenReturn(mlv);
+    when(lvm.getSoftwareLayoutVersion()).thenReturn(slv);
+    return lvm;
+  }
+
+  /**
+   * Mock Interface.
+   */
+  interface MockInterface {
+    String mockMethod();
+  }
+
+  /**
+   * Mock Impl v1.
+   */
+  static class MockClassV1 implements MockInterface {
+    @Override
+    public String mockMethod() {
+      return getClass().getSimpleName();
+    }
+  }
+
+  /**
+   * Mock Impl v2.
+   */
+  static class MockClassV2 extends MockClassV1 {
+  }
+
+  /**
+   * Mock class with a v1 and v2 method.
+   */
+  static class MockClassWithVersionedAPIs {
+    public String mockMethodV1() {
+      return "v1";
+    }
+
+    public String mockMethodV2() {
+      return "v2";
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index d1e2971..a2a322f 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -107,6 +107,7 @@ message OMRequest {
 
   optional UserInfo userInfo = 4;
 
+  optional LayoutVersion layoutVersion = 5;
 
   optional CreateVolumeRequest              createVolumeRequest            = 11;
   optional SetVolumePropertyRequest         setVolumePropertyRequest       = 12;
@@ -1210,6 +1211,10 @@ message GetS3SecretResponse {
     required S3Secret s3Secret = 2;
 }
 
+message LayoutVersion {
+    required uint64 version = 1;
+}
+
 /**
   This will be used internally by OM to replicate S3 Secret across quorum of
   OM's.
diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml
index 4c9a901..895afcb 100644
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@ -131,7 +131,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.reflections</groupId>
       <artifactId>reflections</artifactId>
       <version>0.9.12</version>
-      <scope>test</scope>
     </dependency>
 
   </dependencies>
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 1809827..b7a6b4f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -142,7 +142,8 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.snapshot.OzoneManagerSnapshotProvider;
-import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManagerImpl;
+import org.apache.hadoop.ozone.om.upgrade.OmLayoutVersionManager;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
@@ -312,6 +313,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   private KeyProviderCryptoExtension kmsProvider = null;
   private static String keyProviderUriKeyName =
       CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
+  private final OMLayoutVersionManagerImpl versionManager;
 
   private boolean allowListAllVolumes;
   // Adding parameters needed for VolumeRequests here, so that during request
@@ -352,6 +354,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     omStorage = new OMStorage(conf);
     omId = omStorage.getOmId();
 
+    versionManager = OMLayoutVersionManagerImpl.initialize(omStorage);
+
     // In case of single OM Node Service there will be no OM Node ID
     // specified, set it to value from om storage
     if (this.omNodeDetails.getOMNodeId() == null) {
@@ -1144,8 +1148,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     metadataManager.start(configuration);
     startSecretManagerIfNecessary();
 
-    OMLayoutVersionManager omVersionManager =
-        OMLayoutVersionManager.initialize(omStorage);
 
     if (certClient != null) {
       caCertPem = CertificateCodec.getPEMEncodedString(
@@ -3732,4 +3734,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
 
   }
 
+  public OmLayoutVersionManager getVersionManager() {
+    return versionManager;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index f43dfba..f0117b5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.om.ratis.utils;
 
 import com.google.common.base.Preconditions;
+
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -25,42 +26,16 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
-import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest;
-import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest;
-import org.apache.hadoop.ozone.om.request.bucket.OMBucketSetPropertyRequest;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketAddAclRequest;
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest;
-import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
-import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
-import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
-import org.apache.hadoop.ozone.om.request.key.OMKeysRenameRequest;
-import org.apache.hadoop.ozone.om.request.key.OMTrashRecoverRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixAddAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixSetAclRequest;
-import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequest;
-import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest;
-import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
-import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
-import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest;
-import org.apache.hadoop.ozone.om.request.security.OMCancelDelegationTokenRequest;
-import org.apache.hadoop.ozone.om.request.security.OMGetDelegationTokenRequest;
-import org.apache.hadoop.ozone.om.request.security.OMRenewDelegationTokenRequest;
-import org.apache.hadoop.ozone.om.request.upgrade.OMFinalizeUpgradeProgressRequest;
-import org.apache.hadoop.ozone.om.request.upgrade.OMFinalizeUpgradeRequest;
-import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest;
-import org.apache.hadoop.ozone.om.request.volume.OMVolumeDeleteRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetOwnerRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetQuotaRequest;
 import org.apache.hadoop.ozone.om.request.volume.acl.OMVolumeAddAclRequest;
@@ -73,6 +48,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.ratis.util.FileUtils;
 import org.rocksdb.RocksDBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.nio.file.Path;
@@ -85,131 +62,110 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_
  */
 public final class OzoneManagerRatisUtils {
 
+  private static final Logger LOG = LoggerFactory
+      .getLogger(OzoneManagerRatisUtils.class);
+
   private OzoneManagerRatisUtils() {
   }
-  /**
-   * Create OMClientRequest which encapsulates the OMRequest.
-   * @param omRequest
-   * @return OMClientRequest
-   * @throws IOException
-   */
-  public static OMClientRequest createClientRequest(OMRequest omRequest) {
+
+  public static OMClientRequest getRequest(OzoneManager om,
+                                           OMRequest omRequest) {
     Type cmdType = omRequest.getCmdType();
     switch (cmdType) {
-    case CreateVolume:
-      return new OMVolumeCreateRequest(omRequest);
-    case SetVolumeProperty:
-      boolean hasQuota = omRequest.getSetVolumePropertyRequest()
-          .hasQuotaInBytes();
-      boolean hasOwner = omRequest.getSetVolumePropertyRequest().hasOwnerName();
-      Preconditions.checkState(hasOwner || hasQuota, "Either Quota or owner " +
-          "should be set in the SetVolumeProperty request");
-      Preconditions.checkState(!(hasOwner && hasQuota), "Either Quota or " +
-          "owner should be set in the SetVolumeProperty request. Should not " +
-          "set both");
-      if (hasQuota) {
-        return new OMVolumeSetQuotaRequest(omRequest);
-      } else {
-        return new OMVolumeSetOwnerRequest(omRequest);
-      }
-    case DeleteVolume:
-      return new OMVolumeDeleteRequest(omRequest);
-    case CreateBucket:
-      return new OMBucketCreateRequest(omRequest);
-    case DeleteBucket:
-      return new OMBucketDeleteRequest(omRequest);
-    case SetBucketProperty:
-      return new OMBucketSetPropertyRequest(omRequest);
-    case AllocateBlock:
-      return new OMAllocateBlockRequest(omRequest);
-    case CreateKey:
-      return new OMKeyCreateRequest(omRequest);
-    case CommitKey:
-      return new OMKeyCommitRequest(omRequest);
-    case DeleteKey:
-      return new OMKeyDeleteRequest(omRequest);
-    case DeleteKeys:
-      return new OMKeysDeleteRequest(omRequest);
-    case RenameKey:
-      return new OMKeyRenameRequest(omRequest);
-    case RenameKeys:
-      return new OMKeysRenameRequest(omRequest);
-    case CreateDirectory:
-      return new OMDirectoryCreateRequest(omRequest);
-    case CreateFile:
-      return new OMFileCreateRequest(omRequest);
-    case PurgeKeys:
-      return new OMKeyPurgeRequest(omRequest);
-    case InitiateMultiPartUpload:
-      return new S3InitiateMultipartUploadRequest(omRequest);
-    case CommitMultiPartUpload:
-      return new S3MultipartUploadCommitPartRequest(omRequest);
-    case AbortMultiPartUpload:
-      return new S3MultipartUploadAbortRequest(omRequest);
-    case CompleteMultiPartUpload:
-      return new S3MultipartUploadCompleteRequest(omRequest);
     case AddAcl:
     case RemoveAcl:
     case SetAcl:
-      return getOMAclRequest(omRequest);
-    case GetDelegationToken:
-      return new OMGetDelegationTokenRequest(omRequest);
-    case CancelDelegationToken:
-      return new OMCancelDelegationTokenRequest(omRequest);
-    case RenewDelegationToken:
-      return new OMRenewDelegationTokenRequest(omRequest);
-    case GetS3Secret:
-      return new S3GetSecretRequest(omRequest);
-    case RecoverTrash:
-      return new OMTrashRecoverRequest(omRequest);
-    case FinalizeUpgrade:
-      return new OMFinalizeUpgradeRequest(omRequest);
-    case FinalizeUpgradeProgress:
-      return new OMFinalizeUpgradeProgressRequest(omRequest);
+      return getOMAclRequest(om, omRequest);
+    case SetVolumeProperty:
+      return getVolumeSetPropertyRequest(om, omRequest);
     default:
-      throw new IllegalStateException("Unrecognized write command " +
-          "type request" + cmdType);
+      Class<? extends OMClientRequest> requestClass =
+          om.getVersionManager()
+              .getRequestHandler(omRequest.getCmdType().name());
+      return getClientRequest(requestClass, omRequest);
+    }
+  }
+
+  private static OMClientRequest getClientRequest(Class<?
+      extends OMClientRequest> requestClass, OMRequest omRequest) {
+    try {
+      return requestClass.getDeclaredConstructor(OMRequest.class)
+          .newInstance(omRequest);
+    } catch (Exception ex) {
+      LOG.error("Unable to get request handler for '{}', current layout " +
+              "version = {}, request factory returned '{}'",
+          omRequest.getCmdType(),
+          omRequest.getLayoutVersion().getVersion(),
+          requestClass.getSimpleName());
     }
+    throw new IllegalStateException("Unrecognized write command " +
+        "type request : " + omRequest.getCmdType());
   }
 
-  private static OMClientRequest getOMAclRequest(OMRequest omRequest) {
+  public static OMClientRequest getOMAclRequest(OzoneManager om,
+                                                OMRequest omRequest) {
     Type cmdType = omRequest.getCmdType();
+    String requestType = null;
     if (Type.AddAcl == cmdType) {
       ObjectType type = omRequest.getAddAclRequest().getObj().getResType();
       if (ObjectType.VOLUME == type) {
-        return new OMVolumeAddAclRequest(omRequest);
+        requestType = OMVolumeAddAclRequest.getRequestType();
       } else if (ObjectType.BUCKET == type) {
-        return new OMBucketAddAclRequest(omRequest);
+        requestType = OMBucketAddAclRequest.getRequestType();
       } else if (ObjectType.KEY == type) {
-        return new OMKeyAddAclRequest(omRequest);
+        requestType = OMKeyAddAclRequest.getRequestType();
       } else {
-        return new OMPrefixAddAclRequest(omRequest);
+        requestType = OMPrefixAddAclRequest.getRequestType();
       }
     } else if (Type.RemoveAcl == cmdType) {
       ObjectType type = omRequest.getRemoveAclRequest().getObj().getResType();
       if (ObjectType.VOLUME == type) {
-        return new OMVolumeRemoveAclRequest(omRequest);
+        requestType = OMVolumeRemoveAclRequest.getRequestType();
       } else if (ObjectType.BUCKET == type) {
-        return new OMBucketRemoveAclRequest(omRequest);
+        requestType = OMBucketRemoveAclRequest.getRequestType();
       } else if (ObjectType.KEY == type) {
-        return new OMKeyRemoveAclRequest(omRequest);
+        requestType = OMKeyRemoveAclRequest.getRequestType();
       } else {
-        return new OMPrefixRemoveAclRequest(omRequest);
+        requestType = OMPrefixRemoveAclRequest.getRequestType();
       }
     } else {
       ObjectType type = omRequest.getSetAclRequest().getObj().getResType();
       if (ObjectType.VOLUME == type) {
-        return new OMVolumeSetAclRequest(omRequest);
+        requestType = OMVolumeSetAclRequest.getRequestType();
       } else if (ObjectType.BUCKET == type) {
-        return new OMBucketSetAclRequest(omRequest);
+        requestType = OMBucketSetAclRequest.getRequestType();
       } else if (ObjectType.KEY == type) {
-        return new OMKeySetAclRequest(omRequest);
+        requestType = OMKeySetAclRequest.getRequestType();
       } else {
-        return new OMPrefixSetAclRequest(omRequest);
+        requestType = OMPrefixSetAclRequest.getRequestType();
       }
     }
+    Class<? extends OMClientRequest> requestClass =
+        om.getVersionManager().getRequestHandler(requestType);
+    return getClientRequest(requestClass, omRequest);
   }
 
+  public static OMClientRequest getVolumeSetPropertyRequest(
+      OzoneManager om, OMRequest omRequest) {
+    boolean hasQuota = omRequest.getSetVolumePropertyRequest()
+        .hasQuotaInBytes();
+    boolean hasOwner = omRequest.getSetVolumePropertyRequest().hasOwnerName();
+    Preconditions.checkState(hasOwner || hasQuota,
+        "Either Quota or owner " +
+            "should be set in the SetVolumeProperty request");
+    Preconditions.checkState(!(hasOwner && hasQuota),
+        "Either Quota or " +
+            "owner should be set in the SetVolumeProperty request. Should not "
+            + "set both");
+
+    String requestType = hasQuota ? OMVolumeSetQuotaRequest.getRequestType() :
+        OMVolumeSetOwnerRequest.getRequestType();
+    Class<? extends OMClientRequest> requestClass =
+        om.getVersionManager().getRequestHandler(requestType);
+    return getClientRequest(requestClass, omRequest);
+  }
+
+
   /**
    * Convert exception result to {@link OzoneManagerProtocolProtos.Status}.
    * @param exception
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index 728a624..1f98d83 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LayoutVersion;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -92,7 +93,12 @@ public abstract class OMClientRequest implements RequestAuditor {
    */
   public OMRequest preExecute(OzoneManager ozoneManager)
       throws IOException {
-    omRequest = getOmRequest().toBuilder().setUserInfo(getUserInfo()).build();
+    LayoutVersion layoutVersion = LayoutVersion.newBuilder()
+        .setVersion(ozoneManager.getVersionManager().getMetadataLayoutVersion())
+        .build();
+    omRequest =
+        getOmRequest().toBuilder()
+            .setUserInfo(getUserInfo()).setLayoutVersion(layoutVersion).build();
     return omRequest;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index fd303e7..33faa0a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -76,6 +76,7 @@ import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_L
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .CryptoProtocolVersionProto.ENCRYPTION_ZONES;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateBucket;
 
 /**
  * Handles CreateBucket Request.
@@ -335,4 +336,7 @@ public class OMBucketCreateRequest extends OMClientRequest {
 
   }
 
+  public static String getRequestType() {
+    return CreateBucket.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
index 91aef6a..33ea990 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteBucket;
 
 /**
  * Handles DeleteBucket Request.
@@ -169,4 +170,8 @@ public class OMBucketDeleteRequest extends OMClientRequest {
       return omClientResponse;
     }
   }
+
+  public static String getRequestType() {
+    return DeleteBucket.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
index 583facb..0275d7f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.SetBucketProperty;
 
 /**
  * Handle SetBucketProperty Request.
@@ -270,4 +271,8 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
     }
     return true;
   }
+
+  public static String getRequestType() {
+    return SetBucketProperty.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java
index 78afeff..45cfdcf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java
@@ -18,12 +18,15 @@
 
 package org.apache.hadoop.ozone.om.request.bucket.acl;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.AddAcl;
+
 import java.io.IOException;
 import java.util.List;
 
 import com.google.common.collect.Lists;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.apache.hadoop.ozone.util.BooleanBiFunction;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -115,5 +118,8 @@ public class OMBucketAddAclRequest extends OMBucketAclRequest {
     }
   }
 
+  public static String getRequestType() {
+    return AddAcl.name() + "-" + ObjectType.BUCKET.name();
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java
index 8b6fdba..1c8af01 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java
@@ -18,10 +18,13 @@
 
 package org.apache.hadoop.ozone.om.request.bucket.acl;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RemoveAcl;
+
 import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -112,5 +115,9 @@ public class OMBucketRemoveAclRequest extends OMBucketAclRequest {
       }
     }
   }
+
+  public static String getRequestType() {
+    return RemoveAcl.name() + "-" + ObjectType.BUCKET.name();
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
index cfc4eb4..915342e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
@@ -18,11 +18,14 @@
 
 package org.apache.hadoop.ozone.om.request.bucket.acl;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.SetAcl;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -113,5 +116,9 @@ public class OMBucketSetAclRequest extends OMBucketAclRequest {
       }
     }
   }
+
+  public static String getRequestType() {
+    return SetAcl.name() + "-" + ObjectType.BUCKET.name();
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index 7b2ab51..3ed7793 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -75,6 +75,8 @@ import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryR
 import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
 import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.NONE;
 import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateDirectory;
+
 /**
  * Handle create directory request.
  */
@@ -353,4 +355,7 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
         .setUpdateID(objectId);
   }
 
+  public static String getRequestType() {
+    return CreateDirectory.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index 367e4ba..a2828af 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateF
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.util.Time;
@@ -68,6 +67,7 @@ import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_L
 import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
 import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
 import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateFile;
 
 /**
  * Handles create file request.
@@ -307,7 +307,7 @@ public class OMFileCreateRequest extends OMKeyRequest {
           .setKeyInfo(omKeyInfo.getProtobuf())
           .setID(clientID)
           .setOpenVersion(openVersion).build())
-          .setCmdType(Type.CreateFile);
+          .setCmdType(CreateFile);
       omClientResponse = new OMFileCreateResponse(omResponse.build(),
           omKeyInfo, missingParentInfos, clientID, omVolumeArgs, omBucketInfo);
 
@@ -316,7 +316,7 @@ public class OMFileCreateRequest extends OMKeyRequest {
       result = Result.FAILURE;
       exception = ex;
       omMetrics.incNumCreateFileFails();
-      omResponse.setCmdType(Type.CreateFile);
+      omResponse.setCmdType(CreateFile);
       omClientResponse = new OMFileCreateResponse(createErrorOMResponse(
             omResponse, exception));
     } finally {
@@ -362,4 +362,8 @@ public class OMFileCreateRequest extends OMKeyRequest {
           OMException.ResultCodes.DIRECTORY_NOT_FOUND);
     }
   }
+
+  public static String getRequestType() {
+    return CreateFile.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
index afd6162..59911df 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.AllocateBlock;
 
 /**
  * Handles allocate block request.
@@ -243,4 +244,8 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
 
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return AllocateBlock.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMECKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMECKeyCreateRequest.java
new file mode 100644
index 0000000..56e7e47
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMECKeyCreateRequest.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.CREATE_EC;
+
+import java.io.IOException;
+
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.upgrade.BelongsToLayoutVersion;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+
+/**
+ * Handles Create EC Key  request. (To be removed later)
+ */
+@BelongsToLayoutVersion(CREATE_EC)
+public class OMECKeyCreateRequest extends OMKeyCreateRequest {
+
+  public OMECKeyCreateRequest(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
+    // V2 impl here.
+    return null;
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    // V2 impl here.
+    return null;
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index 29d0243..f246b1c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CommitKey;
 
 /**
  * Handles CommitKey request.
@@ -253,4 +254,8 @@ public class OMKeyCommitRequest extends OMKeyRequest {
 
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return CommitKey.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index f16153d..9e550f5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -67,6 +67,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
 import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateKey;
 
 /**
  * Handles CreateKey request.
@@ -359,4 +360,7 @@ public class OMKeyCreateRequest extends OMKeyRequest {
     return omClientResponse;
   }
 
+  public static String getRequestType() {
+    return CreateKey.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
index e27b7e1..7d536c1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKey;
 
 /**
  * Handles DeleteKey request.
@@ -205,4 +206,8 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
 
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return DeleteKey.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
index ce7f1e9..51f27fe 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.PurgeKeys;
+
 import java.util.ArrayList;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
@@ -71,4 +73,8 @@ public class OMKeyPurgeRequest extends OMKeyRequest {
 
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return PurgeKeys.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
index 4e7c05c..8243859 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RenameKey;
 
 /**
  * Handles rename key request.
@@ -236,4 +237,8 @@ public class OMKeyRenameRequest extends OMKeyRequest {
     auditMap.put(OzoneConsts.DST_KEY, renameKeyRequest.getToKeyName());
     return auditMap;
   }
+
+  public static String getRequestType() {
+    return RenameKey.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
index 907b501..18b5f84 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java
@@ -60,6 +60,7 @@ import static org.apache.hadoop.ozone.audit.OMAction.DELETE_KEYS;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys;
 
 /**
  * Handles DeleteKey request.
@@ -254,4 +255,8 @@ public class OMKeysDeleteRequest extends OMKeyRequest {
     auditMap.put(UNDELETED_KEYS_LIST, String.join(",", unDeletedKeys));
   }
 
+  public static String getRequestType() {
+    return DeleteKeys.name();
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java
index abaa4ae..a17455f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java
@@ -59,6 +59,7 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.
 import static org.apache.hadoop.ozone.OzoneConsts.RENAMED_KEYS_MAP;
 import static org.apache.hadoop.ozone.OzoneConsts.UNRENAMED_KEYS_MAP;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RenameKeys;
 
 /**
  * Handles rename keys request.
@@ -248,6 +249,10 @@ public class OMKeysRenameRequest extends OMKeyRequest {
     return omClientResponse;
   }
 
+  public static String getRequestType() {
+    return RenameKeys.name();
+  }
+
   /**
    * Build audit map for RenameKeys request.
    *
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java
index eca5294..2ecc4e8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
 
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RecoverTrash;
 
 /**
  * Handles RecoverTrash request.
@@ -140,4 +141,7 @@ public class OMTrashRecoverRequest extends OMKeyRequest {
     return omClientResponse;
   }
 
+  public static String getRequestType() {
+    return RecoverTrash.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
index 3697cb8..5527742 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.key.acl;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.AddAcl;
+
 import java.io.IOException;
 import java.util.List;
 
@@ -27,6 +29,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -101,5 +104,9 @@ public class OMKeyAddAclRequest extends OMKeyAclRequest {
     // No need to check not null here, this will be never called with null.
     return omKeyInfo.addAcl(ozoneAcls.get(0));
   }
+
+  public static String getRequestType() {
+    return AddAcl.name() + "-" + ObjectType.KEY;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
index f0d13be..cb3d16a7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.key.acl;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RemoveAcl;
+
 import java.io.IOException;
 import java.util.List;
 
@@ -27,6 +29,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -102,5 +105,9 @@ public class OMKeyRemoveAclRequest extends OMKeyAclRequest {
     // No need to check not null here, this will be never called with null.
     return omKeyInfo.removeAcl(ozoneAcls.get(0));
   }
+
+  public static String getRequestType() {
+    return RemoveAcl.name() + "-" + ObjectType.KEY;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
index 6d904e6..b76862f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.key.acl;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.SetAcl;
+
 import java.io.IOException;
 import java.util.List;
 
@@ -28,6 +30,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -98,5 +101,9 @@ public class OMKeySetAclRequest extends OMKeyAclRequest {
     // No need to check not null here, this will be never called with null.
     return omKeyInfo.setAcls(ozoneAcls);
   }
+
+  public static String getRequestType() {
+    return SetAcl.name() + "-" + ObjectType.KEY;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
index 7160042..e4dcea6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.key.acl.prefix;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.AddAcl;
+
 import java.io.IOException;
 import java.util.List;
 
@@ -28,6 +30,7 @@ import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
 import org.slf4j.Logger;
@@ -123,5 +126,9 @@ public class OMPrefixAddAclRequest extends OMPrefixAclRequest {
     return prefixManager.addAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo,
         trxnLogIndex);
   }
+
+  public static String getRequestType() {
+    return AddAcl.name() + "-" + ObjectType.PREFIX;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
index 482250d..7af93ae 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.key.acl.prefix;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RemoveAcl;
+
 import java.io.IOException;
 import java.util.List;
 
@@ -28,6 +30,7 @@ import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
 import org.slf4j.Logger;
@@ -119,5 +122,9 @@ public class OMPrefixRemoveAclRequest extends OMPrefixAclRequest {
       OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException {
     return prefixManager.removeAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo);
   }
+
+  public static String getRequestType() {
+    return RemoveAcl.name() + "-" + ObjectType.PREFIX;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java
index 144e90b..a0afece 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.key.acl.prefix;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.SetAcl;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -28,6 +30,7 @@ import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
 import org.slf4j.Logger;
@@ -116,5 +119,9 @@ public class OMPrefixSetAclRequest extends OMPrefixAclRequest {
     return prefixManager.setAcl(ozoneObj, ozoneAcls, omPrefixInfo,
         trxnLogIndex);
   }
+
+  public static String getRequestType() {
+    return SetAcl.name() + "-" + ObjectType.PREFIX;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index 08063b6..7c5e0e3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -54,6 +54,7 @@ import java.util.UUID;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload;
 
 /**
  * Handles initiate multipart upload request.
@@ -250,4 +251,8 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
 
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return InitiateMultiPartUpload.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index 8b53e70..a0263ee 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.AbortMultiPartUpload;
 
 /**
  * Handles Abort of multipart upload request.
@@ -211,4 +212,8 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
 
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return AbortMultiPartUpload.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index f471de4..31b245b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -58,6 +58,7 @@ import java.util.stream.Collectors;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CommitMultiPartUpload;
 
 /**
  * Handle Multipart upload commit upload part file.
@@ -279,5 +280,8 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
     return omClientResponse;
   }
 
+  public static String getRequestType() {
+    return CommitMultiPartUpload.name();
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index dff022b..fd4af37 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -57,6 +57,8 @@ import org.apache.commons.codec.digest.DigestUtils;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CompleteMultiPartUpload;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -377,5 +379,9 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
         new CacheKey<>(multipartKey),
         new CacheValue<>(Optional.absent(), transactionLogIndex));
   }
+
+  public static String getRequestType() {
+    return CompleteMultiPartUpload.name();
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java
index b240373..691b278 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_SECRET_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.GetS3Secret;
 
 /**
  * Handles GetS3Secret request.
@@ -186,4 +187,8 @@ public class S3GetSecretRequest extends OMClientRequest {
     }
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return GetS3Secret.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java
index e931735..0b159bb 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.security;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CancelDelegationToken;
+
 import com.google.common.base.Optional;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneManager;
@@ -108,7 +110,6 @@ public class OMCancelDelegationTokenRequest extends OMClientRequest {
     return omClientResponse;
   }
 
-
   public Token<OzoneTokenIdentifier> getToken() {
     CancelDelegationTokenRequestProto cancelDelegationTokenRequest =
         getOmRequest().getCancelDelegationTokenRequest();
@@ -116,4 +117,8 @@ public class OMCancelDelegationTokenRequest extends OMClientRequest {
     return OMPBHelper.convertToDelegationToken(
         cancelDelegationTokenRequest.getToken());
   }
+
+  public static String getRequestType() {
+    return CancelDelegationToken.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java
index 4d2a6b4..81eb3d2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.security;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.GetDelegationToken;
+
 import com.google.common.base.Optional;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -64,7 +66,6 @@ public class OMGetDelegationTokenRequest extends OMClientRequest {
     Token<OzoneTokenIdentifier> token = ozoneManager
         .getDelegationToken(new Text(getDelegationTokenRequest.getRenewer()));
 
-
     // Client issues GetDelegationToken request, when received by OM leader
     // it will generate a token. Original GetDelegationToken request is
     // converted to UpdateGetDelegationToken request with the generated token
@@ -181,4 +182,8 @@ public class OMGetDelegationTokenRequest extends OMClientRequest {
 
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return GetDelegationToken.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java
index 360ca4f..a683216 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.om.request.security;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RenewDelegationToken;
+
 import java.io.IOException;
 
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
@@ -156,4 +158,8 @@ public class OMRenewDelegationTokenRequest extends OMClientRequest {
 
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return RenewDelegationToken.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeProgressRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeProgressRequest.java
index 3cb9210..9a8d56e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeProgressRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeProgressRequest.java
@@ -17,6 +17,8 @@
 
 package org.apache.hadoop.ozone.om.request.upgrade;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.FinalizeUpgradeProgress;
+
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
@@ -84,4 +86,8 @@ public class OMFinalizeUpgradeProgressRequest extends OMClientRequest {
 
     return response;
   }
+
+  public static String getRequestType() {
+    return FinalizeUpgradeProgress.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java
index 772eae7..1b1897a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java
@@ -17,13 +17,14 @@
 
 package org.apache.hadoop.ozone.om.request.upgrade;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.FinalizeUpgrade;
+
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.upgrade.OMFinalizeUpgradeResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
@@ -53,7 +54,7 @@ public class OMFinalizeUpgradeRequest extends OMClientRequest {
     LOG.trace("Request: {}", getOmRequest());
     OMResponse.Builder responseBuilder =
         OmResponseUtil.getOMResponseBuilder(getOmRequest());
-    responseBuilder.setCmdType(OzoneManagerProtocolProtos.Type.FinalizeUpgrade);
+    responseBuilder.setCmdType(FinalizeUpgrade);
     OMClientResponse response = null;
 
     try {
@@ -77,4 +78,8 @@ public class OMFinalizeUpgradeRequest extends OMClientRequest {
 
     return response;
   }
+
+  public static String getRequestType() {
+    return FinalizeUpgrade.name();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
index 7e2ccd9..ce1b2cb 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.util.Time;
 
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateVolume;
 
 /**
  * Handles volume create request.
@@ -201,6 +202,10 @@ public class OMVolumeCreateRequest extends OMVolumeRequest {
     }
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return CreateVolume.name();
+  }
 }
 
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
index ce93e26..05480cc 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
@@ -51,6 +51,8 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteVolume;
+
 /**
  * Handles volume delete request.
  */
@@ -162,5 +164,9 @@ public class OMVolumeDeleteRequest extends OMVolumeRequest {
     }
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return DeleteVolume.name();
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
index 6873086..d95d271 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
@@ -48,6 +48,7 @@ import java.io.IOException;
 import java.util.Map;
 
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.SetVolumeProperty;
 
 /**
  * Handle set owner request for volume.
@@ -208,5 +209,9 @@ public class OMVolumeSetOwnerRequest extends OMVolumeRequest {
     }
     return omClientResponse;
   }
+
+  public static String getRequestType() {
+    return SetVolumeProperty.name() + "-Owner";
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
index fc54c88..3b1b634 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.SetVolumeProperty;
 
 /**
  * Handles set Quota request for volume.
@@ -217,6 +218,10 @@ public class OMVolumeSetQuotaRequest extends OMVolumeRequest {
     }
     return true;
   }
+
+  public static String getRequestType() {
+    return SetVolumeProperty.name() + "-Quota";
+  }
 }
 
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
index 12008e2..d65e0df 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.ozone.om.request.volume.acl;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.AddAcl;
+
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
@@ -28,6 +30,7 @@ import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -113,4 +116,8 @@ public class OMVolumeAddAclRequest extends OMVolumeAclRequest {
           getOmRequest());
     }
   }
+
+  public static String getRequestType() {
+    return AddAcl.name() + "-" + ObjectType.VOLUME;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
index 461ad48..e386e20 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.ozone.om.request.volume.acl;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RemoveAcl;
+
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
@@ -28,6 +30,7 @@ import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -112,4 +115,8 @@ public class OMVolumeRemoveAclRequest extends OMVolumeAclRequest {
           getOmRequest());
     }
   }
+
+  public static String getRequestType() {
+    return RemoveAcl.name() + "-" + ObjectType.VOLUME;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
index c73e19e..6a0d0f1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.ozone.om.request.volume.acl;
 
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.SetAcl;
+
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -27,6 +29,7 @@ import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -109,4 +112,8 @@ public class OMVolumeSetAclRequest extends OMVolumeAclRequest {
           getOmRequest());
     }
   }
+
+  public static String getRequestType() {
+    return SetAcl.name() + "-" + ObjectType.VOLUME;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/BelongsToLayoutVersion.java
similarity index 78%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/BelongsToLayoutVersion.java
index 2da8b38..cfee3e6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/BelongsToLayoutVersion.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -23,13 +23,11 @@ import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
 
-import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature;
-
 /**
- * Annotation to specify if an API is backed up by a Layout Feature.
+ * Annotation to mark a class that belongs to a specific Layout Version.
  */
-@Target(ElementType.METHOD)
+@Target(ElementType.TYPE)
 @Retention(RetentionPolicy.RUNTIME)
-public @interface OMLayoutFeatureAPI {
+public @interface BelongsToLayoutVersion {
   OMLayoutFeature value();
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/DisallowedUntilLayoutVersion.java
similarity index 81%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/DisallowedUntilLayoutVersion.java
index 2da8b38..c437c1d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/DisallowedUntilLayoutVersion.java
@@ -23,13 +23,13 @@ import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
 
-import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature;
-
 /**
- * Annotation to specify if an API is backed up by a Layout Feature.
+ * Annotation used to "disallow" an API if current layout version does
+ * not include the associated layout feature. Helps to keep the method logic
+ * and upgrade related cross cutting concern separate.
  */
 @Target(ElementType.METHOD)
 @Retention(RetentionPolicy.RUNTIME)
-public @interface OMLayoutFeatureAPI {
+public @interface DisallowedUntilLayoutVersion {
   OMLayoutFeature value();
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java
new file mode 100644
index 0000000..0d22b01
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.upgrade;
+
+import java.util.Optional;
+
+import org.apache.hadoop.ozone.upgrade.LayoutFeature;
+
+/**
+ * List of OM Layout features / versions.
+ */
+public enum OMLayoutFeature implements LayoutFeature {
+  INITIAL_VERSION(0, "Initial Layout Version"),
+  CREATE_EC(1, ""),
+  NEW_FEATURE(2, "new feature", new NewOmFeatureUpgradeAction());
+
+  private int layoutVersion;
+  private String description;
+  private Optional<OmUpgradeAction> omUpgradeAction = Optional.empty();
+
+  OMLayoutFeature(final int layoutVersion, String description) {
+    this.layoutVersion = layoutVersion;
+    this.description = description;
+  }
+
+  OMLayoutFeature(final int layoutVersion, String description,
+                  OmUpgradeAction upgradeAction) {
+    this.layoutVersion = layoutVersion;
+    this.description = description;
+    omUpgradeAction = Optional.of(upgradeAction);
+  }
+
+  @Override
+  public int layoutVersion() {
+    return layoutVersion;
+  }
+
+  @Override
+  public String description() {
+    return description;
+  }
+
+  @Override
+  public Optional<OmUpgradeAction> onFinalizeAction() {
+    return omUpgradeAction;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java
index a92e3b4..dbc0259 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java
@@ -28,6 +28,7 @@ import org.aspectj.lang.annotation.Aspect;
 import org.aspectj.lang.annotation.Before;
 import org.aspectj.lang.reflect.MethodSignature;
 
+
 /**
  * 'Aspect' for OM Layout Feature API. All methods annotated with the
  * specific annotation will have pre-processing done here to check layout
@@ -36,19 +37,22 @@ import org.aspectj.lang.reflect.MethodSignature;
 @Aspect
 public class OMLayoutFeatureAspect {
 
-  @Before("@annotation(OMLayoutFeatureAPI) && execution(* *(..))")
+  @Before("@annotation(DisallowedUntilLayoutVersion) && execution(* *(..))")
   public void checkLayoutFeature(JoinPoint joinPoint) throws Throwable {
     String featureName = ((MethodSignature) joinPoint.getSignature())
-        .getMethod().getAnnotation(OMLayoutFeatureAPI.class).value().name();
-    LayoutVersionManager lvm = OMLayoutVersionManager.getInstance();
+        .getMethod().getAnnotation(DisallowedUntilLayoutVersion.class)
+        .value().name();
+    LayoutVersionManager lvm = OMLayoutVersionManagerImpl.getInstance();
     if (!lvm.isAllowed(featureName)) {
       LayoutFeature layoutFeature = lvm.getFeature(featureName);
       throw new OMException(String.format("Operation %s cannot be invoked " +
-          "before finalization. Current layout version = %d, feature's layout" +
-              " version = %d",
-          featureName,
-          lvm.getMetadataLayoutVersion(),
-          layoutFeature.layoutVersion()), NOT_SUPPORTED_OPERATION);
+              "before finalization. It belongs to the layout feature %s, " +
+              "whose layout version is %d. Current Layout version is %d",
+          joinPoint.getSignature().toShortString(),
+          layoutFeature.name(),
+          layoutFeature.layoutVersion(),
+          lvm.getMetadataLayoutVersion()),
+          NOT_SUPPORTED_OPERATION);
     }
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureCatalog.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureCatalog.java
deleted file mode 100644
index c5ed27d..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureCatalog.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.upgrade;
-
-import java.util.Optional;
-
-import org.apache.hadoop.ozone.upgrade.LayoutFeature;
-
-/**
- * Catalog of Ozone Manager features.
- */
-public class OMLayoutFeatureCatalog {
-
-  /**
-   * List of OM Features.
-   */
-  public enum OMLayoutFeature implements LayoutFeature {
-    INITIAL_VERSION(0, "Initial Layout Version"),
-    CREATE_EC(1, ""),
-    NEW_FEATURE(2, "new feature", new NewOmFeatureUpgradeAction());
-
-
-    private int layoutVersion;
-    private String description;
-    private Optional<OmUpgradeAction> omUpgradeAction = Optional.empty();
-
-    OMLayoutFeature(final int layoutVersion, String description) {
-      this.layoutVersion = layoutVersion;
-      this.description = description;
-    }
-
-    OMLayoutFeature(final int layoutVersion, String description,
-                    OmUpgradeAction upgradeAction) {
-      this.layoutVersion = layoutVersion;
-      this.description = description;
-      omUpgradeAction = Optional.of(upgradeAction);
-    }
-
-    @Override
-    public int layoutVersion() {
-      return layoutVersion;
-    }
-
-    @Override
-    public String description() {
-      return description;
-    }
-
-    @Override
-    public Optional<OmUpgradeAction> onFinalizeAction() {
-      return omUpgradeAction;
-    }
-  }
-
-  /**
-   * This is an example of an "API" that uses a new Layout feature (EC) that is
-   * not yet supported by the current layout version. The following can be
-   * "guarded" by just adding the following annotation, thereby keeping the
-   * method logic and upgrade logic separate.
-   */
-  @OMLayoutFeatureAPI(OMLayoutFeature.CREATE_EC)
-  public String ecMethod() {
-    // Blah Blah EC Blah....
-    return "ec";
-  }
-
-  /**
-   * This is an example of an "API" that uses a Layout feature (EC) that is
-   * supported by the current layout version.
-   */
-  @OMLayoutFeatureAPI(OMLayoutFeature.INITIAL_VERSION)
-  public String basicMethod() {
-    // Blah Blah Basic Blah....
-    return "basic";
-  }
-}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java
similarity index 53%
copy from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
copy to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java
index 2da8b38..c237638 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java
@@ -18,18 +18,32 @@
 
 package org.apache.hadoop.ozone.om.upgrade;
 
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature;
+import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.CREATE_EC;
 
 /**
- * Annotation to specify if an API is backed up by a Layout Feature.
+ * Test util class. To be removed.
  */
-@Target(ElementType.METHOD)
-@Retention(RetentionPolicy.RUNTIME)
-public @interface OMLayoutFeatureAPI {
-  OMLayoutFeature value();
+public class OMLayoutFeatureUtil {
+
+  /**
+   * This is an example of an "API" that uses a new Layout feature (EC) that is
+   * not yet supported by the current layout version. The following can be
+   * "disallowed" by just adding the following annotation, thereby keeping the
+   * method logic and upgrade logic separate.
+   */
+  @DisallowedUntilLayoutVersion(CREATE_EC)
+  public String ecMethod() {
+    // Blah Blah EC Blah....
+    return "ec";
+  }
+
+  /**
+   * This is an example of an "API" that is
+   * supported by the current layout version.
+   */
+  public String basicMethod() {
+    // Blah Blah Basic Blah....
+    return "basic";
+  }
 }
+
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java
deleted file mode 100644
index 2f959a9..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.upgrade;
-
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
-
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.ozone.om.OMStorage;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature;
-import org.apache.hadoop.ozone.upgrade.AbstractLayoutVersionManager;
-import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Class to manage layout versions and features for Ozone Manager.
- */
-public final class OMLayoutVersionManager extends AbstractLayoutVersionManager {
-
-  private static OMLayoutVersionManager omVersionManager;
-
-  private OMLayoutVersionManager() {
-  }
-
-  /**
-   * Read only instance to OM Version Manager.
-   * @return version manager instance.
-   */
-  public static synchronized LayoutVersionManager getInstance() {
-    if (omVersionManager == null) {
-      throw new RuntimeException("OM Layout Version Manager not yet " +
-          "initialized.");
-    }
-    return omVersionManager;
-  }
-
-
-  /**
-   * Initialize OM version manager from storage.
-   * @return version manager instance.
-   */
-  public static synchronized OMLayoutVersionManager initialize(
-      OMStorage omStorage)
-      throws OMException {
-    if (omVersionManager == null) {
-      omVersionManager = new OMLayoutVersionManager();
-      omVersionManager.init(omStorage);
-    }
-    return omVersionManager;
-  }
-
-  /**
-   * Initialize the OM Layout Features and current Layout Version.
-   * @param storage to read the current layout version.
-   * @throws OMException on error.
-   */
-  private void init(Storage storage) throws OMException {
-    init(storage.getLayoutVersion(), OMLayoutFeature.values());
-    if (metadataLayoutVersion > softwareLayoutVersion) {
-      throw new OMException(
-          String.format("Cannot initialize VersionManager. Metadata " +
-                  "layout version (%d) > software layout version (%d)",
-              metadataLayoutVersion, softwareLayoutVersion),
-          NOT_SUPPORTED_OPERATION);
-    }
-  }
-
-  @VisibleForTesting
-  protected synchronized static void resetLayoutVersionManager() {
-    if (omVersionManager != null) {
-      omVersionManager.reset();
-      omVersionManager = null;
-    }
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManagerImpl.java
new file mode 100644
index 0000000..70a8d6b
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManagerImpl.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.upgrade;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
+import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.Set;
+
+import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.upgrade.AbstractLayoutVersionManager;
+import org.apache.hadoop.ozone.upgrade.LayoutVersionInstanceFactory;
+import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
+import org.apache.hadoop.ozone.upgrade.VersionFactoryKey;
+import org.reflections.Reflections;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Class to manage layout versions and features for Ozone Manager.
+ */
+public final class OMLayoutVersionManagerImpl
+    extends AbstractLayoutVersionManager implements OmLayoutVersionManager {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMLayoutVersionManagerImpl.class);
+
+  private static OMLayoutVersionManagerImpl omVersionManager;
+  private LayoutVersionInstanceFactory<Class<? extends OMClientRequest>>
+      requestFactory;
+
+  private OMLayoutVersionManagerImpl() {
+    requestFactory = new LayoutVersionInstanceFactory<>();
+  }
+
+  /**
+   * Read only instance to OM Version Manager.
+   * @return version manager instance.
+   */
+  public static synchronized LayoutVersionManager getInstance() {
+    if (omVersionManager == null) {
+      throw new RuntimeException("OM Layout Version Manager not yet " +
+          "initialized.");
+    }
+    return omVersionManager;
+  }
+
+
+  /**
+   * Initialize OM version manager from storage.
+   * @return version manager instance.
+   */
+  public static synchronized OMLayoutVersionManagerImpl initialize(
+      OMStorage omStorage)
+      throws OMException {
+    if (omVersionManager == null) {
+      omVersionManager = new OMLayoutVersionManagerImpl();
+      omVersionManager.init(omStorage);
+    }
+    return omVersionManager;
+  }
+
+  /**
+   * Initialize the OM Layout Features and current Layout Version.
+   * @param storage to read the current layout version.
+   * @throws OMException on error.
+   */
+  private void init(Storage storage) throws OMException {
+    init(storage.getLayoutVersion(), OMLayoutFeature.values());
+
+    if (metadataLayoutVersion > softwareLayoutVersion) {
+      throw new OMException(
+          String.format("Cannot initialize VersionManager. Metadata " +
+                  "layout version (%d) > software layout version (%d)",
+              metadataLayoutVersion, softwareLayoutVersion),
+          NOT_SUPPORTED_OPERATION);
+    }
+    registerOzoneManagerRequests();
+  }
+
+  public void doFinalize(OzoneManager om) {
+    super.doFinalize(om);
+    requestFactory.onFinalize(this);
+  }
+
+  @VisibleForTesting
+  protected synchronized static void resetLayoutVersionManager() {
+    if (omVersionManager != null) {
+      omVersionManager.reset();
+      omVersionManager = null;
+    }
+  }
+
+  public void reset() {
+    requestFactory = null;
+    super.reset();
+  }
+
+  private void registerOzoneManagerRequests() {
+    Reflections reflections = new Reflections(
+        "org.apache.hadoop.ozone.om.request");
+    Set<Class<? extends OMClientRequest>> subTypes =
+        reflections.getSubTypesOf(OMClientRequest.class);
+    try {
+      for (Class<? extends OMClientRequest> requestClass : subTypes) {
+        if (Modifier.isAbstract(requestClass.getModifiers())) {
+          continue;
+        }
+        try {
+          Method getRequestTypeMethod = requestClass.getMethod(
+              "getRequestType");
+          String type = (String) getRequestTypeMethod.invoke(null);
+          LOG.debug("Registering {} with OmVersionFactory.",
+              requestClass.getSimpleName());
+          BelongsToLayoutVersion annotation =
+              requestClass.getAnnotation(BelongsToLayoutVersion.class);
+          if (annotation == null) {
+            registerRequestType(type, INITIAL_VERSION.layoutVersion(),
+                requestClass);
+          } else {
+            registerRequestType(type, annotation.value().layoutVersion(),
+                requestClass);
+          }
+        } catch (NoSuchMethodException nsmEx) {
+          LOG.warn("Found a class {} with request type not defined. ",
+              requestClass.getSimpleName());
+        }
+      }
+    } catch (Exception ex) {
+      LOG.error("Exception registering OM client request.", ex);
+    }
+  }
+
+  private void registerRequestType(String type, int version,
+                                   Class<? extends OMClientRequest> reqClass) {
+    VersionFactoryKey key = new VersionFactoryKey.Builder()
+        .key(type).version(version).build();
+    requestFactory.register(this, key, reqClass);
+  }
+
+  /**
+   * Given a type and version, get the corresponding request class type.
+   * @param requestType type string
+   * @param version version
+   * @return class type.
+   */
+  @Override
+  public Class< ? extends OMClientRequest> getRequestHandler(String type) {
+    VersionFactoryKey versionFactoryKey = new VersionFactoryKey.Builder()
+        .key(type).build();
+    return requestFactory.get(this, versionFactoryKey);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OmLayoutVersionManager.java
similarity index 66%
rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OmLayoutVersionManager.java
index 2da8b38..51b5c6c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAPI.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OmLayoutVersionManager.java
@@ -18,18 +18,12 @@
 
 package org.apache.hadoop.ozone.om.upgrade;
 
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
 
 /**
- * Annotation to specify if an API is backed up by a Layout Feature.
+ * Read only Interface for OM Layout Version Management.
  */
-@Target(ElementType.METHOD)
-@Retention(RetentionPolicy.RUNTIME)
-public @interface OMLayoutFeatureAPI {
-  OMLayoutFeature value();
+public interface OmLayoutVersionManager extends LayoutVersionManager {
+  Class<? extends OMClientRequest> getRequestHandler(String requestType);
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
index 73277e0..0207b6b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -16,6 +16,8 @@
  */
 package org.apache.hadoop.ozone.protocolPB;
 
+import static org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils.getRequest;
+
 import java.io.IOException;
 import java.util.Optional;
 import java.util.concurrent.ExecutionException;
@@ -98,7 +100,6 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
     this.omRatisServer = ratisServer;
     dispatcher = new OzoneProtocolMessageDispatcher<>("OzoneProtocol",
         metrics, LOG);
-
   }
 
   /**
@@ -124,8 +125,7 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
       } else {
         if (omRatisServer.isLeader()) {
           try {
-            OMClientRequest omClientRequest =
-                OzoneManagerRatisUtils.createClientRequest(request);
+            OMClientRequest omClientRequest = getRequest(ozoneManager, request);
             request = omClientRequest.preExecute(ozoneManager);
           } catch (IOException ex) {
             // As some of the preExecute returns error. So handle here.
@@ -217,8 +217,7 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
       if (OmUtils.isReadOnly(request)) {
         return handler.handleReadRequest(request);
       } else {
-        OMClientRequest omClientRequest =
-            OzoneManagerRatisUtils.createClientRequest(request);
+        OMClientRequest omClientRequest = getRequest(ozoneManager, request);
         request = omClientRequest.preExecute(ozoneManager);
         index = transactionIndex.incrementAndGet();
         omClientResponse = handler.handleWriteRequest(request, index);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
index 35ab275..dd9e704 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
 
 import com.google.common.collect.Lists;
+
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesResponse;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclRequest;
@@ -91,6 +92,7 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadInfo;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartInfo;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -222,7 +224,7 @@ public class OzoneManagerRequestHandler implements RequestHandler {
   public OMClientResponse handleWriteRequest(OMRequest omRequest,
       long transactionLogIndex) {
     OMClientRequest omClientRequest =
-        OzoneManagerRatisUtils.createClientRequest(omRequest);
+        OzoneManagerRatisUtils.getRequest(getOzoneManager(), omRequest);
     OMClientResponse omClientResponse =
         omClientRequest.validateAndUpdateCache(getOzoneManager(),
             transactionLogIndex, ozoneManagerDoubleBuffer::add);
@@ -290,6 +292,14 @@ public class OzoneManagerRequestHandler implements RequestHandler {
       throw new OMException("ClientId is null",
           OMException.ResultCodes.INVALID_REQUEST);
     }
+
+    // Layout version should have been set up the leader while serializing
+    // the request, and hence cannot be null. This version is used by each
+    // node to identify which request handler version to use.
+    if (omRequest.getLayoutVersion() == null) {
+      throw new OMException("LayoutVersion for request is null.",
+          OMException.ResultCodes.INTERNAL_ERROR);
+    }
   }
 
   private CheckVolumeAccessResponse checkVolumeAccess(
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
index 7ae82f8..012f2c9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.ozone.om.request.bucket;
 
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManagerImpl;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -37,6 +38,7 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 
 
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 /**
@@ -71,6 +73,9 @@ public class TestBucketRequest {
     when(ozoneManager.getMetrics()).thenReturn(omMetrics);
     when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
     when(ozoneManager.isRatisEnabled()).thenReturn(true);
+    OMLayoutVersionManagerImpl lvm = mock(OMLayoutVersionManagerImpl.class);
+    when(lvm.getMetadataLayoutVersion()).thenReturn(0);
+    when(ozoneManager.getVersionManager()).thenReturn(lvm);
     auditLogger = Mockito.mock(AuditLogger.class);
     when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
     Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
index 116ba5c..4c1425e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.om.KeyManager;
 import org.apache.hadoop.ozone.om.KeyManagerImpl;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManagerImpl;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
 import org.junit.After;
 import org.junit.Before;
@@ -58,6 +59,7 @@ import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 /**
@@ -108,6 +110,9 @@ public class TestOMKeyRequest {
     when(ozoneManager.getMetrics()).thenReturn(omMetrics);
     when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
     when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration);
+    OMLayoutVersionManagerImpl lvm = mock(OMLayoutVersionManagerImpl.class);
+    when(lvm.getMetadataLayoutVersion()).thenReturn(0);
+    when(ozoneManager.getVersionManager()).thenReturn(lvm);
     when(ozoneManager.isRatisEnabled()).thenReturn(true);
     auditLogger = Mockito.mock(AuditLogger.class);
     when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java
index d0b2cf0..c2486db 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManagerImpl;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .CreateVolumeRequest;
@@ -44,6 +45,7 @@ import org.junit.rules.TemporaryFolder;
 import org.mockito.Mockito;
 
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 /**
@@ -67,7 +69,7 @@ public class TestOMVolumeRequest {
 
   @Before
   public void setup() throws Exception {
-    ozoneManager = Mockito.mock(OzoneManager.class);
+    ozoneManager = mock(OzoneManager.class);
     omMetrics = OMMetrics.create();
     OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
@@ -76,8 +78,11 @@ public class TestOMVolumeRequest {
     when(ozoneManager.getMetrics()).thenReturn(omMetrics);
     when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
     when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L);
+    OMLayoutVersionManagerImpl lvm = mock(OMLayoutVersionManagerImpl.class);
+    when(lvm.getMetadataLayoutVersion()).thenReturn(0);
+    when(ozoneManager.getVersionManager()).thenReturn(lvm);
     when(ozoneManager.isRatisEnabled()).thenReturn(true);
-    auditLogger = Mockito.mock(AuditLogger.class);
+    auditLogger = mock(AuditLogger.class);
     when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
     Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java
index b68c7c2..d37d0e7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java
@@ -56,8 +56,8 @@ public class TestOMLayoutFeatureAspect {
    */
   @Test
   public void testCheckLayoutFeature() throws Exception {
-    OMLayoutVersionManager.initialize(new OMStorage(configuration));
-    OMLayoutFeatureCatalog testObj = new OMLayoutFeatureCatalog();
+    OMLayoutVersionManagerImpl.initialize(new OMStorage(configuration));
+    OMLayoutFeatureUtil testObj = new OMLayoutFeatureUtil();
     try {
       testObj.ecMethod();
       Assert.fail();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
index cfcfe24..9116e59 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
@@ -18,8 +18,9 @@
 
 package org.apache.hadoop.ozone.om.upgrade;
 
-import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature.CREATE_EC;
-import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature.INITIAL_VERSION;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
+import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.CREATE_EC;
+import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -30,8 +31,9 @@ import java.io.IOException;
 
 import org.apache.hadoop.ozone.om.OMStorage;
 import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeatureCatalog.OMLayoutFeature;
-import org.apache.hadoop.ozone.upgrade.LayoutFeature;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.junit.After;
+import org.junit.Assert;
 import org.junit.Test;
 
 /**
@@ -39,12 +41,17 @@ import org.junit.Test;
  */
 public class TestOMVersionManager {
 
+  @After
+  public void cleanup() {
+    OMLayoutVersionManagerImpl.resetLayoutVersionManager();
+  }
+
   @Test
   public void testOMLayoutVersionManager() throws IOException {
     OMStorage omStorage = mock(OMStorage.class);
     when(omStorage.getLayoutVersion()).thenReturn(0);
-    OMLayoutVersionManager omVersionManager =
-        OMLayoutVersionManager.initialize(omStorage);
+    OMLayoutVersionManagerImpl omVersionManager =
+        OMLayoutVersionManagerImpl.initialize(omStorage);
     assertTrue(omVersionManager.isAllowed(INITIAL_VERSION));
     assertFalse(omVersionManager.isAllowed(CREATE_EC));
     assertEquals(0, omVersionManager.getMetadataLayoutVersion());
@@ -55,10 +62,38 @@ public class TestOMVersionManager {
   }
 
   @Test
+  public void testOMLayoutVersionManagerInitError() {
+    OMStorage omStorage = mock(OMStorage.class);
+    when(omStorage.getLayoutVersion()).thenReturn(
+        OMLayoutFeature.values()[OMLayoutFeature.values().length - 1]
+            .layoutVersion() + 1);
+    try {
+      OMLayoutVersionManagerImpl.initialize(omStorage);
+      Assert.fail();
+    } catch (OMException ex) {
+      assertEquals(NOT_SUPPORTED_OPERATION, ex.getResult());
+    }
+  }
+
+  @Test
+  public void testOMLayoutVersionManagerReset() throws IOException {
+    OMStorage omStorage = mock(OMStorage.class);
+    when(omStorage.getLayoutVersion()).thenReturn(0);
+    OMLayoutVersionManagerImpl omVersionManager =
+        OMLayoutVersionManagerImpl.initialize(omStorage);
+    int numLayoutVersions = OMLayoutFeature.values().length;
+    assertEquals(
+        OMLayoutFeature.values()[numLayoutVersions - 1].layoutVersion(),
+        omVersionManager.getSoftwareLayoutVersion());
+    OMLayoutVersionManagerImpl.resetLayoutVersionManager();
+    assertEquals(0, omVersionManager.getSoftwareLayoutVersion());
+  }
+
+  @Test
   public void testOMLayoutFeatureCatalog() {
     OMLayoutFeature[] values = OMLayoutFeature.values();
     int currVersion = Integer.MIN_VALUE;
-    for (LayoutFeature lf : values) {
+    for (OMLayoutFeature lf : values) {
       assertTrue(currVersion <= lf.layoutVersion());
       currVersion = lf.layoutVersion();
     }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOmVersionManagerRequestFactory.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOmVersionManagerRequestFactory.java
new file mode 100644
index 0000000..dffddd1
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOmVersionManagerRequestFactory.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.upgrade;
+
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateKey;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.key.OMECKeyCreateRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.reflections.Reflections;
+
+/**
+ * Test OmVersionFactory.
+ */
+public class TestOmVersionManagerRequestFactory {
+
+  private static OMLayoutVersionManagerImpl omVersionManager;
+
+  @BeforeClass
+  public static void setup() throws OMException {
+    OMStorage omStorage = mock(OMStorage.class);
+    when(omStorage.getLayoutVersion()).thenReturn(0);
+    omVersionManager = OMLayoutVersionManagerImpl.initialize(omStorage);
+  }
+
+  @Test
+  public void testKeyCreateRequest() throws Exception {
+
+    // Try getting v1 of 'CreateKey'.
+    Class<? extends OMClientRequest> requestType =
+        omVersionManager.getRequestHandler(CreateKey.name());
+    Assert.assertEquals(requestType, OMKeyCreateRequest.class);
+
+    // Finalize the version manager.
+    omVersionManager.doFinalize(mock(OzoneManager.class));
+
+    // Try getting 'CreateKey' again. Should return CreateECKey.
+    requestType = omVersionManager.getRequestHandler(CreateKey.name());
+    Assert.assertEquals(requestType, OMECKeyCreateRequest.class);
+  }
+
+  @Test
+  public void testAllOMRequestClassesHaveGetRequestTypeMethod()
+      throws Exception {
+    Reflections reflections = new Reflections(
+        "org.apache.hadoop.ozone.om.request");
+    Set<Class<? extends OMClientRequest>> subTypes =
+        reflections.getSubTypesOf(OMClientRequest.class);
+    List<Class<? extends OMClientRequest>> collect = subTypes.stream()
+            .filter(c -> !Modifier.isAbstract(c.getModifiers()))
+            .collect(Collectors.toList());
+
+    for (Class<? extends OMClientRequest> c : collect) {
+      Method getRequestTypeMethod = null;
+      try {
+        getRequestTypeMethod = c.getMethod("getRequestType");
+      } catch (NoSuchMethodException nsmEx) {
+        Assert.fail(String.format(
+            "%s does not have the 'getRequestType' method " +
+            "which should be defined or inherited for every OM request class.",
+            c));
+      }
+      String type = (String) getRequestTypeMethod.invoke(null);
+      Assert.assertNotNull(String.format("Cannot get handler for %s", type),
+          omVersionManager.getRequestHandler(type));
+    }
+  }
+
+  @Test
+  public void testOmClientRequestHasExpectedConstructor()
+      throws NoSuchMethodException {
+    Reflections reflections = new Reflections(
+        "org.apache.hadoop.ozone.om.request");
+    Set<Class<? extends OMClientRequest>> subTypes =
+        reflections.getSubTypesOf(OMClientRequest.class);
+
+    for (Class<? extends OMClientRequest> requestClass : subTypes) {
+      if (Modifier.isAbstract(requestClass.getModifiers())) {
+        continue;
+      }
+      Method getRequestTypeMethod = requestClass.getMethod(
+          "getRequestType");
+      Assert.assertNotNull(getRequestTypeMethod);
+
+      Constructor<? extends OMClientRequest> constructorWithOmRequestArg =
+          requestClass.getDeclaredConstructor(OMRequest.class);
+      Assert.assertNotNull(constructorWithOmRequestArg);
+    }
+  }
+}
diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml
index 4a62fc7..588ecc8 100644
--- a/hadoop-ozone/s3gateway/pom.xml
+++ b/hadoop-ozone/s3gateway/pom.xml
@@ -37,6 +37,11 @@
       <scope>compile</scope>
     </dependency>
     <dependency>
+      <groupId>org.javassist</groupId>
+      <artifactId>javassist</artifactId>
+      <version>3.26.0-GA</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-test-utils</artifactId>
       <scope>test</scope>


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org