You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by av...@apache.org on 2021/02/09 04:41:21 UTC

[ozone] branch HDDS-3698-nonrolling-upgrade created (now dd14670)

This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a change to branch HDDS-3698-nonrolling-upgrade
in repository https://gitbox.apache.org/repos/asf/ozone.git.


      at dd14670  trigger new CI check

This branch includes the following new commits:

     new 1e187f7  Merge remote-tracking branch 'upstream/master' into upgrade-branch-merge-candidate
     new 7ae013e  Fix spotbugs issues.
     new ba4aeca  trigger new CI check
     new dd14670  trigger new CI check

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 02/04: Fix spotbugs issues.

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-nonrolling-upgrade
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 7ae013e3adcd96b6a1d56a24bb8b5e2c3db748b4
Author: Aravindan Vijayan <av...@cloudera.com>
AuthorDate: Mon Feb 8 12:48:26 2021 -0800

    Fix spotbugs issues.
---
 .../upgrade/TestDataNodeStartupSlvLessThanMlv.java         |  3 +--
 .../hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java | 12 ++++++------
 .../apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java   |  2 +-
 .../apache/hadoop/ozone/om/TestOzoneManagerPrepare.java    |  9 ++++++---
 .../ozone-manager/dev-support/findbugsExcludeFile.xml      |  4 ++++
 .../org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java  |  2 +-
 .../hadoop/ozone/om/upgrade/TestOMVersionManager.java      |  3 +--
 .../ozone/om/upgrade/TestOzoneManagerPrepareState.java     | 14 ++++++++++----
 .../hadoop/ozone/recon/scm/TestReconNodeManager.java       |  3 ++-
 9 files changed, 32 insertions(+), 20 deletions(-)

diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
index e075b13..50001aa 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
@@ -68,8 +68,7 @@ public class TestDataNodeStartupSlvLessThanMlv {
         HddsProtos.NodeType.DATANODE, mlv);
 
     try {
-      DatanodeStateMachine stateMachine =
-               new DatanodeStateMachine(getNewDatanodeDetails(), conf, null,
+      new DatanodeStateMachine(getNewDatanodeDetails(), conf, null,
                    null);
       Assert.fail("Expected IOException due to incorrect MLV on DataNode " +
           "creation.");
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index 847627e..2fdb36f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@ -51,7 +51,7 @@ public class TestSCMBlockProtocolServer {
   private StorageContainerManager scm;
   private NodeManager nodeManager;
   private ScmBlockLocationProtocolServerSideTranslatorPB service;
-  private final int nodeCount = 10;
+  private static final int NODE_COUNT = 10;
 
   @Before
   public void setUp() throws Exception {
@@ -64,7 +64,7 @@ public class TestSCMBlockProtocolServer {
     scm.exitSafeMode();
     // add nodes to scm node manager
     nodeManager = scm.getScmNodeManager();
-    for (int i = 0; i < nodeCount; i++) {
+    for (int i = 0; i < NODE_COUNT; i++) {
       nodeManager.register(randomDatanodeDetails(), null, null, null);
 
     }
@@ -95,7 +95,7 @@ public class TestSCMBlockProtocolServer {
     System.out.println("client = " + client);
     datanodeDetails.stream().forEach(
         node -> System.out.println(node.toString()));
-    Assert.assertTrue(datanodeDetails.size() == nodeCount);
+    Assert.assertTrue(datanodeDetails.size() == NODE_COUNT);
 
     // illegal client 1
     client += "X";
@@ -103,14 +103,14 @@ public class TestSCMBlockProtocolServer {
     System.out.println("client = " + client);
     datanodeDetails.stream().forEach(
         node -> System.out.println(node.toString()));
-    Assert.assertTrue(datanodeDetails.size() == nodeCount);
+    Assert.assertTrue(datanodeDetails.size() == NODE_COUNT);
     // illegal client 2
     client = "/default-rack";
     datanodeDetails = server.sortDatanodes(nodes, client);
     System.out.println("client = " + client);
     datanodeDetails.stream().forEach(
         node -> System.out.println(node.toString()));
-    Assert.assertTrue(datanodeDetails.size() == nodeCount);
+    Assert.assertTrue(datanodeDetails.size() == NODE_COUNT);
 
     // unknown node to sort
     nodes.add(UUID.randomUUID().toString());
@@ -122,7 +122,7 @@ public class TestSCMBlockProtocolServer {
             .build();
     ScmBlockLocationProtocolProtos.SortDatanodesResponseProto resp =
         service.sortDatanodes(request, CURRENT_VERSION);
-    Assert.assertTrue(resp.getNodeList().size() == nodeCount);
+    Assert.assertTrue(resp.getNodeList().size() == NODE_COUNT);
     System.out.println("client = " + client);
     resp.getNodeList().stream().forEach(
         node -> System.out.println(node.getNetworkName()));
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
index 309f8d5..97cce34 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
@@ -104,7 +104,7 @@ public class TestSCMNodeMetrics {
   public void testHBProcessing() throws InterruptedException {
     long hbProcessed = getCounter("NumHBProcessed");
 
-    NodeReportProto nodeReport = createNodeReport();
+    createNodeReport();
 
     LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
     LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
index 8dfc433..6ce6b8e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
@@ -269,9 +269,12 @@ public class TestOzoneManagerPrepare extends TestOzoneManagerHA {
     File logDir = Paths.get(ratisDir, groupIdDirName, "current")
         .toFile();
 
-    for (File file : logDir.listFiles()) {
-      if (file.getName().startsWith("log")) {
-        return true;
+    File[] files = logDir.listFiles();
+    if (files != null) {
+      for (File file : files) {
+        if (file.getName().startsWith("log")) {
+          return true;
+        }
       }
     }
     return false;
diff --git a/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml
index 898cda6..2e2a8cc 100644
--- a/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml
@@ -77,4 +77,8 @@
     <Class name="org.apache.hadoop.ozone.security.TestOzoneTokenIdentifier"/>
     <Bug pattern="UC_USELESS_OBJECT" />
   </Match>
+  <Match>
+    <Class name="org.apache.hadoop.ozone.om.upgrade.TestOMLayoutFeatureAspect"/>
+    <Bug pattern="BC_IMPOSSIBLE_CAST" />
+  </Match>
 </FindBugsFilter>
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index e62c534..01c0586 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -144,7 +144,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
-  public static final String[] ALL_TABLES = new String[] {
+  static final String[] ALL_TABLES = new String[] {
       USER_TABLE,
       VOLUME_TABLE,
       BUCKET_TABLE,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
index 1411caf..51285f9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
@@ -76,8 +76,7 @@ public class TestOMVersionManager {
         OMLayoutFeature.values()[OMLayoutFeature.values().length - 1]
             .layoutVersion() + 1);
     try {
-      OMLayoutVersionManager omVersionManager =
-          new OMLayoutVersionManager(omStorage);
+      new OMLayoutVersionManager(omStorage);
       Assert.fail();
     } catch (OMException ex) {
       assertEquals(NOT_SUPPORTED_OPERATION, ex.getResult());
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java
index 035b67a..d9307ee 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java
@@ -33,6 +33,7 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.nio.charset.Charset;
 import java.util.Random;
 
 public class TestOzoneManagerPrepareState {
@@ -41,6 +42,7 @@ public class TestOzoneManagerPrepareState {
 
   private static final int TEST_INDEX = 5;
   private OzoneManagerPrepareState prepareState;
+  private static final Random RANDOM = new Random();
 
   @Before
   public void setup() throws Exception {
@@ -131,7 +133,7 @@ public class TestOzoneManagerPrepareState {
   @Test
   public void testRestoreGarbageMarkerFile() throws Exception {
     byte[] randomBytes = new byte[10];
-    new Random().nextBytes(randomBytes);
+    RANDOM.nextBytes(randomBytes);
     writePrepareMarkerFile(randomBytes);
 
     PrepareStatus status = prepareState.restorePrepare(TEST_INDEX);
@@ -193,12 +195,16 @@ public class TestOzoneManagerPrepareState {
   }
 
   private void writePrepareMarkerFile(long index) throws IOException {
-    writePrepareMarkerFile(Long.toString(index).getBytes());
+    writePrepareMarkerFile(Long.toString(index).getBytes(
+        Charset.defaultCharset()));
   }
 
   private void writePrepareMarkerFile(byte[] bytes) throws IOException {
     File markerFile = prepareState.getPrepareMarkerFile();
-    markerFile.getParentFile().mkdirs();
+    boolean mkdirs = markerFile.getParentFile().mkdirs();
+    if (!mkdirs) {
+      throw new IOException("Unable to create marker file directory.");
+    }
     try(FileOutputStream stream =
             new FileOutputStream(markerFile)) {
       stream.write(bytes);
@@ -212,7 +218,7 @@ public class TestOzoneManagerPrepareState {
 
     try (FileInputStream stream = new FileInputStream(prepareMarkerFile)) {
       stream.read(data);
-      index = Long.parseLong(new String(data));
+      index = Long.parseLong(new String(data, Charset.defaultCharset()));
     }
 
     return index;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
index b1e7aa5..9374a04 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
@@ -118,7 +118,8 @@ public class TestReconNodeManager {
     // Upon processing the heartbeat, the illegal command should be filtered out
     List<SCMCommand> returnedCmds =
         reconNodeManager.processHeartbeat(datanodeDetails,
-            LayoutVersionProto.newBuilder().build());
+            LayoutVersionProto.newBuilder().setMetadataLayoutVersion(0)
+                .setSoftwareLayoutVersion(0).build());
     assertEquals(1, returnedCmds.size());
     assertEquals(SCMCommandProto.Type.reregisterCommand,
         returnedCmds.get(0).getType());


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 04/04: trigger new CI check

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-nonrolling-upgrade
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit dd14670621c5cb693d5067bbd1be31e4d3f4abd5
Author: Aravindan Vijayan <av...@cloudera.com>
AuthorDate: Mon Feb 8 16:17:53 2021 -0800

    trigger new CI check


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 01/04: Merge remote-tracking branch 'upstream/master' into upgrade-branch-merge-candidate

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-nonrolling-upgrade
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 1e187f7435be8c89db1f791ca829fe7f4e672dfe
Merge: ee8d276 375da4d
Author: Aravindan Vijayan <av...@cloudera.com>
AuthorDate: Mon Feb 8 10:16:57 2021 -0800

    Merge remote-tracking branch 'upstream/master' into upgrade-branch-merge-candidate

 .github/close-pending.sh                           |    6 +-
 .github/workflows/close-pending.yaml               |    2 +-
 .github/workflows/post-commit.yml                  |   19 +-
 CONTRIBUTING.md                                    |    9 +
 HISTORY.md                                         |    4 +-
 README.md                                          |    1 +
 SECURITY.md                                        |   23 +
 .../dev-support/findbugsExcludeFile.xml            |   13 +-
 hadoop-hdds/client/pom.xml                         |   17 +
 .../apache/hadoop/hdds/scm/OzoneClientConfig.java  |   18 +
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |    9 +-
 .../hadoop/hdds/scm/XceiverClientManager.java      |    4 +
 .../hadoop/hdds/scm/XceiverClientMetrics.java      |    4 +-
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |    6 +-
 .../hadoop/hdds/scm/storage/BlockInputStream.java  |  135 +-
 .../hadoop/hdds/scm/storage/BlockOutputStream.java |    7 +-
 .../hadoop/hdds/scm/storage/ChunkInputStream.java  |   67 +-
 .../hdds/scm/storage/DummyBlockInputStream.java    |   31 +-
 .../storage/DummyBlockInputStreamWithRetry.java    |    4 +-
 .../hdds/scm/storage/DummyChunkInputStream.java    |   26 +-
 .../hdds/scm/storage/TestBlockInputStream.java     |  224 +++-
 .../storage/TestBlockOutputStreamCorrectness.java  |    4 +-
 .../hdds/scm/storage/TestChunkInputStream.java     |   68 +-
 .../client/src/test/resources/log4j.properties     |   23 +
 .../common/dev-support/findbugsExcludeFile.xml     |   10 +
 hadoop-hdds/common/src/main/conf/hadoop-env.cmd    |   90 --
 hadoop-hdds/common/src/main/conf/hadoop-env.sh     |  451 -------
 hadoop-hdds/common/src/main/conf/ozone-env.sh      |  279 ++++
 .../org/apache/hadoop/hdds/DatanodeVersions.java   |   29 +-
 .../org/apache/hadoop/hdds/client/OzoneQuota.java  |  207 +--
 .../org/apache/hadoop/hdds/client/QuotaList.java   |   67 +
 .../hadoop/hdds/conf/OzoneConfiguration.java       |   17 +
 .../hadoop/hdds/protocol/DatanodeDetails.java      |  270 +++-
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |   36 +-
 .../java/org/apache/hadoop/hdds/scm/ScmConfig.java |    2 +-
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |    9 +-
 .../apache/hadoop/hdds/scm/client/ScmClient.java   |   45 +-
 .../common/helpers/ContainerWithPipeline.java      |    4 +-
 .../apache/hadoop/hdds/scm/net/InnerNodeImpl.java  |    7 +
 .../apache/hadoop/hdds/scm/net/NetConstants.java   |   26 +-
 .../hadoop/hdds/scm/net/NetworkTopologyImpl.java   |   18 +
 .../org/apache/hadoop/hdds/scm/net/NodeImpl.java   |   12 +
 .../hadoop/hdds/scm/net/NodeSchemaLoader.java      |    2 +-
 .../hadoop/hdds/scm/net/NodeSchemaManager.java     |    2 +-
 .../apache/hadoop/hdds/scm/pipeline/Pipeline.java  |    9 +-
 .../protocol/StorageContainerLocationProtocol.java |   19 +-
 .../apache/hadoop/hdds/utils/BackgroundTask.java   |    1 +
 .../org/apache/hadoop/ozone/ClientVersions.java    |   27 +-
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |   12 +
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   26 +-
 .../org/apache/hadoop/ozone/common/Storage.java    |    2 +-
 .../apache/hadoop/ozone/lease/LeaseManager.java    |   38 +-
 .../common/src/main/resources/ozone-default.xml    |   62 +-
 .../hadoop/hdds/conf/TestOzoneConfiguration.java   |   12 +-
 .../hadoop/hdds/protocol/MockDatanodeDetails.java  |   25 +-
 .../hadoop/hdds/protocol/TestDatanodeDetails.java  |   63 +
 .../hdds/scm/net/TestNetworkTopologyImpl.java      |    5 +-
 .../hadoop/hdds/scm/pipeline/TestPipeline.java     |   51 +
 .../hadoop/hdds/tracing/TestStringCodec.java       |    2 +-
 .../hadoop/ozone/audit/TestOzoneAuditLogger.java   |    2 +-
 .../apache/hadoop/ozone/common/TestChecksum.java   |    6 +-
 .../hadoop/hdds/conf/ConfigFileAppender.java       |    4 +-
 .../hadoop/hdds/conf/ConfigFileGenerator.java      |   17 +-
 .../org/apache/hadoop/hdds/conf/StorageUnit.java   |    1 +
 .../apache/hadoop/hdds/conf/TimeDurationUtil.java  |   14 +
 .../hadoop/hdds/conf/TestConfigFileGenerator.java  |    4 +-
 .../dev-support/findbugsExcludeFile.xml            |   58 +
 .../org/apache/hadoop/hdds/scm/VersionInfo.java    |    2 +-
 .../apache/hadoop/ozone/HddsDatanodeService.java   |   23 +-
 .../container/common/DataNodeLayoutVersion.java    |    2 +-
 .../container/common/helpers/ContainerMetrics.java |    6 +-
 .../container/common/helpers/ContainerUtils.java   |   14 +-
 .../container/common/helpers/DatanodeIdYaml.java   |   72 +-
 .../ozone/container/common/impl/ContainerData.java |    2 +-
 .../container/common/impl/ContainerDataYaml.java   |    6 +-
 .../container/common/impl/HddsDispatcher.java      |    5 +-
 .../common/impl/StorageLocationReport.java         |    6 +
 .../container/common/report/ReportPublisher.java   |    2 +-
 .../common/statemachine/DatanodeStateMachine.java  |   15 +-
 .../common/statemachine/EndpointStateMachine.java  |    4 +
 .../common/statemachine/StateContext.java          |  143 +-
 .../commandhandler/DeleteBlocksCommandHandler.java |  191 ++-
 .../SetNodeOperationalStateCommandHandler.java     |  157 +++
 .../states/endpoint/HeartbeatEndpointTask.java     |   28 +-
 .../common/transport/server/XceiverServerGrpc.java |   20 +-
 .../common/transport/server/ratis/CSMMetrics.java  |    7 +-
 .../server/ratis/ContainerStateMachine.java        |   21 +-
 .../transport/server/ratis/XceiverServerRatis.java |  150 ++-
 .../container/common/utils/ContainerCache.java     |    2 +-
 .../container/common/utils/ReferenceCountedDB.java |    1 +
 .../container/keyvalue/KeyValueContainer.java      |   54 +-
 .../container/keyvalue/KeyValueContainerData.java  |    3 +
 .../ozone/container/keyvalue/KeyValueHandler.java  |    9 +-
 .../container/keyvalue/impl/BlockManagerImpl.java  |    4 +
 .../background/BlockDeletingService.java           |  175 ++-
 .../metadata/AbstractDatanodeDBDefinition.java     |    2 +-
 .../metadata/DatanodeSchemaOneDBDefinition.java    |    6 +
 .../metadata/DatanodeSchemaTwoDBDefinition.java    |   30 +-
 .../metadata/DatanodeStoreSchemaTwoImpl.java       |   14 +-
 ...mpl.java => DeletedBlocksTransactionCodec.java} |   35 +-
 .../ozone/container/ozoneimpl/OzoneContainer.java  |   39 +-
 .../replication/DownloadAndImportReplicator.java   |    8 +-
 .../replication/GrpcReplicationClient.java         |   15 +-
 .../container/replication/MeasuredReplicator.java  |  109 ++
 .../container/replication/ReplicationServer.java   |  144 ++
 .../replication/ReplicationSupervisor.java         |   22 +-
 .../container/replication/ReplicationTask.java     |   22 +-
 .../replication/SimpleContainerDownloader.java     |    8 +-
 .../protocol/commands/CommandForDatanode.java      |    1 +
 .../hadoop/ozone/protocol/commands/SCMCommand.java |    1 +
 .../commands/SetNodeOperationalStateCommand.java   |   89 ++
 .../hadoop/ozone/TestHddsSecureDatanodeInit.java   |    5 +-
 .../ozone/container/ContainerTestHelper.java       |   15 +-
 .../container/common/TestBlockDeletingService.java |  279 +++-
 .../common/impl/TestContainerDataYaml.java         |    2 +-
 .../impl/TestContainerDeletionChoosingPolicy.java  |    2 +-
 .../common/impl/TestContainerPersistence.java      |   10 +-
 .../container/common/interfaces/TestHandler.java   |    1 -
 .../common/statemachine/TestStateContext.java      |  286 +++-
 .../TestCreatePipelineCommandHandler.java          |    6 +-
 .../container/keyvalue/ChunkLayoutTestInfo.java    |    2 +
 .../keyvalue/TestKeyValueBlockIterator.java        |    2 -
 .../container/keyvalue/TestKeyValueContainer.java  |  147 +-
 .../keyvalue/TestKeyValueContainerCheck.java       |    5 +-
 .../container/keyvalue/TestKeyValueHandler.java    |    8 +-
 .../TestKeyValueHandlerWithUnhealthyContainer.java |    9 +-
 .../container/keyvalue/TestTarContainerPacker.java |   33 +-
 .../container/keyvalue/helpers/TestChunkUtils.java |   11 +-
 .../container/ozoneimpl/TestOzoneContainer.java    |    2 +-
 .../ReplicationSupervisorScheduling.java           |  127 ++
 .../replication/TestMeasuredReplicator.java        |  106 ++
 .../replication/TestSimpleContainerDownloader.java |    4 +-
 .../org.mockito.plugins.MockMaker                  |    3 +-
 hadoop-hdds/dev-support/checkstyle/checkstyle.xml  |    2 +
 hadoop-hdds/docs/README.md                         |    2 +-
 hadoop-hdds/docs/config.yaml                       |    2 +
 hadoop-hdds/docs/content/_index.md                 |    2 +-
 hadoop-hdds/docs/content/_index.zh.md              |    4 +-
 .../docs/content/concept/OzoneManager.zh.md        |    4 +-
 hadoop-hdds/docs/content/concept/Recon.zh.md       |  116 ++
 .../content/concept/StorageContainerManager.zh.md  |    6 +-
 hadoop-hdds/docs/content/design/decommissioning.md |   10 +-
 hadoop-hdds/docs/content/feature/Quota.md          |   66 +-
 hadoop-hdds/docs/content/feature/Quota.zh.md       |   62 +-
 hadoop-hdds/docs/content/feature/Recon.zh.md       |   23 +-
 hadoop-hdds/docs/content/feature/_index.md         |   10 +-
 hadoop-hdds/docs/content/interface/Cli.md          |    2 +-
 hadoop-hdds/docs/content/interface/ReconApi.zh.md  |  502 +++++++
 hadoop-hdds/docs/content/interface/S3.md           |    6 +-
 hadoop-hdds/docs/content/interface/S3.zh.md        |   19 +
 hadoop-hdds/docs/content/recipe/Prometheus.md      |    6 +-
 hadoop-hdds/docs/content/recipe/Prometheus.zh.md   |    6 +-
 .../docs/content/{feature => security}/GDPR.md     |    2 +-
 .../docs/content/{feature => security}/GDPR.zh.md  |    3 +
 .../docs/content/security/SecuringOzoneHTTP.md     |   12 +-
 .../docs/content/security/SecurityWithRanger.md    |   21 +-
 .../docs/content/security/SecurityWithRanger.zh.md |   18 +-
 hadoop-hdds/docs/content/tools/AuditParser.md      |    2 +-
 hadoop-hdds/docs/dev-support/bin/generate-site.sh  |    9 +-
 hadoop-hdds/docs/pom.xml                           |    4 +-
 hadoop-hdds/docs/static/ozone-logo-monochrome.svg  |    4 +-
 .../themes/ozonedoc/layouts/_default/baseof.html   |    2 +
 .../themes/ozonedoc/layouts/_default/section.html  |    3 +
 .../themes/ozonedoc/layouts/_default/single.html   |    3 +
 .../docs/themes/ozonedoc/layouts/index.html        |   26 +-
 .../themes/ozonedoc/layouts/partials/footer.html   |    9 +
 .../themes/ozonedoc/layouts/partials/header.html   |    4 +-
 .../themes/ozonedoc/layouts/partials/navbar.html   |    4 +-
 .../docs/themes/ozonedoc/static/css/ozonedoc.css   |   30 +-
 .../dev-support/findbugsExcludeFile.xml            |   20 +-
 hadoop-hdds/framework/pom.xml                      |   13 +
 .../hadoop/hdds/protocol/SCMSecurityProtocol.java  |   15 +
 .../SCMSecurityProtocolClientSideTranslatorPB.java |   27 +
 ...lockLocationProtocolClientSideTranslatorPB.java |    2 +
 ...inerLocationProtocolClientSideTranslatorPB.java |   94 +-
 .../hdds/security/token/BlockTokenVerifier.java    |   46 +-
 .../certificate/authority/CertificateServer.java   |   12 +
 .../certificate/authority/CertificateStore.java    |   16 +
 .../certificate/authority/DefaultApprover.java     |    1 +
 .../certificate/authority/DefaultCAServer.java     |   19 +
 .../x509/certificate/client/CertificateClient.java |   12 +
 .../certificate/client/DNCertificateClient.java    |    1 +
 .../client/DefaultCertificateClient.java           |   30 +-
 .../certificate/client/OMCertificateClient.java    |    2 +
 .../hadoop/hdds/security/x509/keys/KeyCodec.java   |    8 +-
 .../server/OzoneProtocolMessageDispatcher.java     |    5 +-
 .../hadoop/hdds/server/http/HtmlQuoting.java       |    2 +-
 .../hadoop/hdds/server/http/HttpServer2.java       |    3 +-
 .../server/http/RatisNameRewriteSampleBuilder.java |    2 +-
 .../apache/hadoop/hdds/utils/HddsServerUtil.java   |   19 +
 .../org/apache/hadoop/hdds/utils/RocksDBStore.java |   12 +-
 .../hadoop/hdds/utils/RocksDBStoreMBean.java       |    2 +-
 .../hadoop/hdds/utils/db/BatchOperation.java       |    1 +
 .../hadoop/hdds/utils/db/ByteArrayKeyValue.java    |    2 +
 .../hadoop/hdds/utils/db/DBConfigFromFile.java     |   16 +-
 .../org/apache/hadoop/hdds/utils/db/DBStore.java   |   14 +-
 .../org/apache/hadoop/hdds/utils/db/RDBStore.java  |   18 +-
 .../org/apache/hadoop/hdds/utils/db/RDBTable.java  |   18 +-
 .../apache/hadoop/hdds/utils/db/TypedTable.java    |   27 +-
 .../hadoop/hdds/utils/db/cache/EpochEntry.java     |    1 +
 .../{TableCacheImpl.java => FullTableCache.java}   |  147 +-
 ...{TableCacheImpl.java => PartialTableCache.java} |  126 +-
 .../hadoop/hdds/utils/db/cache/TableCache.java     |   30 +-
 .../x509/certificate/authority/MockCAStore.java    |   11 +
 .../client/TestCertificateClientInit.java          |    6 +-
 .../client/TestDefaultCertificateClient.java       |   45 +-
 .../x509/certificate/utils/TestCRLCodec.java       |    9 +-
 .../apache/hadoop/hdds/server/TestJsonUtils.java   |   14 +-
 .../server/http/TestRatisDropwizardExports.java    |    9 +-
 .../hadoop/hdds/utils/TestMetadataStore.java       |    4 +-
 .../hadoop/hdds/utils/TestRocksDBStoreMBean.java   |    2 +-
 .../hadoop/hdds/utils/db/TestDBConfigFromFile.java |    4 +-
 ...TestTableCacheImpl.java => TestTableCache.java} |  144 +-
 .../src/main/proto/ScmAdminProtocol.proto          |   60 +-
 .../src/main/proto/DatanodeClientProtocol.proto    |    3 +
 .../interface-client/src/main/proto/hdds.proto     |   21 +-
 .../interface-client/src/main/resources/proto.lock |    8 -
 .../proto/ScmServerDatanodeHeartbeatProtocol.proto |   12 +-
 .../src/main/proto/ScmServerProtocol.proto         |    1 +
 .../src/main/proto/ScmServerSecurityProtocol.proto |   26 +
 .../dev-support/findbugsExcludeFile.xml            |   27 +-
 hadoop-hdds/server-scm/pom.xml                     |    7 +
 .../hadoop/hdds/scm/SCMCommonPlacementPolicy.java  |    4 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java    |   11 +-
 .../hadoop/hdds/scm/block/DeletedBlockLogImpl.java |  216 ++-
 .../hdds/scm/block/SCMBlockDeletingService.java    |   11 +-
 .../hdds/scm/container/ContainerReplica.java       |   12 +-
 .../hdds/scm/container/ContainerReplicaCount.java  |  271 ++++
 .../hdds/scm/container/ReplicationManager.java     |  307 +++--
 .../hdds/scm/container/SCMContainerManager.java    |   45 +-
 .../scm/container/states/ContainerStateMap.java    |    4 +-
 .../apache/hadoop/hdds/scm/events/SCMEvents.java   |    6 +
 .../hadoop/hdds/scm/metadata/PipelineCodec.java    |    4 +-
 .../hadoop/hdds/scm/metadata/SCMMetadataStore.java |   14 -
 .../hdds/scm/metadata/SCMMetadataStoreImpl.java    |   30 -
 .../hdds/scm/metadata/X509CertificateCodec.java    |    6 +-
 ...anagerMXBean.java => DatanodeAdminMonitor.java} |   27 +-
 .../hdds/scm/node/DatanodeAdminMonitorImpl.java    |  371 ++++++
 .../apache/hadoop/hdds/scm/node/DatanodeInfo.java  |   52 +-
 .../hadoop/hdds/scm/node/DeadNodeHandler.java      |    7 +-
 .../hdds/scm/node/InvalidHostStringException.java} |   18 +-
 .../hdds/scm/node/InvalidNodeStateException.java}  |   20 +-
 .../hadoop/hdds/scm/node/NewNodeHandler.java       |   20 +
 .../hdds/scm/node/NodeDecommissionManager.java     |  369 ++++++
 .../apache/hadoop/hdds/scm/node/NodeManager.java   |   59 +-
 .../hadoop/hdds/scm/node/NodeManagerMXBean.java    |    2 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java     |  424 +++---
 .../apache/hadoop/hdds/scm/node/NodeStatus.java    |  211 +++
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |  248 +++-
 .../hadoop/hdds/scm/node/SCMNodeMetrics.java       |  104 +-
 .../hdds/scm/node/StartDatanodeAdminHandler.java   |   68 +
 .../hadoop/hdds/scm/node/states/NodeStateMap.java  |  264 +++-
 .../hdds/scm/pipeline/PipelinePlacementPolicy.java |   22 +-
 .../hadoop/hdds/scm/pipeline/PipelineProvider.java |    6 +-
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |    7 +-
 .../hdds/scm/pipeline/SCMPipelineManager.java      |   29 +-
 .../hdds/scm/pipeline/SimplePipelineProvider.java  |    1 -
 .../algorithms/PipelineChoosePolicyFactory.java    |    2 -
 .../SCMSecurityProtocolServerSideTranslatorPB.java |   25 +
 ...lockLocationProtocolServerSideTranslatorPB.java |   22 +-
 ...inerLocationProtocolServerSideTranslatorPB.java |  128 +-
 .../hdds/scm/safemode/ContainerSafeModeRule.java   |   19 +-
 .../hdds/scm/safemode/DataNodeSafeModeRule.java    |    5 +-
 .../scm/safemode/HealthyPipelineSafeModeRule.java  |    7 +-
 .../safemode/OneReplicaPipelineSafeModeRule.java   |   24 +-
 .../hdds/scm/safemode/SCMSafeModeManager.java      |    1 +
 .../hadoop/hdds/scm/server/SCMCertStore.java       |   42 +
 .../hdds/scm/server/SCMClientProtocolServer.java   |  115 +-
 .../hdds/scm/server/SCMContainerMetrics.java       |   11 +-
 .../hdds/scm/server/SCMDatanodeProtocolServer.java |    8 +
 .../apache/hadoop/hdds/scm/server/SCMMXBean.java   |    6 +-
 .../hdds/scm/server/SCMSecurityProtocolServer.java |   32 +-
 .../hdds/scm/server/StorageContainerManager.java   |   54 +-
 .../scm/server/StorageContainerManagerStarter.java |    3 +
 .../main/resources/webapps/scm/scm-overview.html   |   26 +-
 .../src/main/resources/webapps/scm/scm.js          |   25 +-
 .../scm/TestStorageContainerManagerHttpServer.java |    2 +
 .../hadoop/hdds/scm/block/TestBlockManager.java    |   13 +-
 .../hadoop/hdds/scm/block/TestDeletedBlockLog.java |   71 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java |   85 +-
 .../hdds/scm/container/SimpleMockNodeManager.java  |  337 +++++
 .../container/TestCloseContainerEventHandler.java  |    2 +-
 .../scm/container/TestContainerReportHandler.java  |   25 +-
 .../hdds/scm/container/TestReplicationManager.java |  290 +++-
 .../scm/container/TestUnknownContainerReport.java  |    4 +-
 .../algorithms/TestContainerPlacementFactory.java  |   23 +-
 .../TestSCMContainerPlacementCapacity.java         |    4 +-
 .../TestSCMContainerPlacementRackAware.java        |   18 +-
 .../TestSCMContainerPlacementRandom.java           |    4 +-
 .../states/TestContainerReplicaCount.java          |  465 +++++++
 .../hdds/scm/node/TestContainerPlacement.java      |    2 +-
 .../hdds/scm/node/TestDatanodeAdminMonitor.java    |  530 ++++++++
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  |   31 +-
 .../hdds/scm/node/TestNodeDecommissionManager.java |  297 +++++
 .../hadoop/hdds/scm/node/TestNodeStateManager.java |  320 +++++
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |  251 +++-
 .../hdds/scm/node/TestSCMNodeStorageStatMap.java   |    2 +-
 .../scm/node/states/TestNode2ContainerMap.java     |    4 +-
 .../hdds/scm/node/states/TestNodeStateMap.java     |  189 +++
 .../TestPipelineDatanodesIntersection.java         |    3 +-
 .../scm/pipeline/TestPipelinePlacementPolicy.java  |   13 +-
 .../scm/pipeline/TestRatisPipelineProvider.java    |    5 +-
 .../hdds/scm/pipeline/TestSCMPipelineManager.java  |   29 +-
 ...TestSCMStoreImplWithOldPipelineIDKeyFormat.java |   10 -
 .../choose/algorithms/TestLeaderChoosePolicy.java  |    6 +-
 .../hdds/scm/safemode/TestSCMSafeModeManager.java  |    4 +-
 .../scm/server/TestSCMBlockProtocolServer.java     |    5 +-
 .../hdds/scm/server/TestSCMContainerMetrics.java   |    2 +
 .../server/TestStorageContainerManagerStarter.java |   17 +-
 .../ozone/container/common/TestEndPoint.java       |   51 +-
 .../placement/TestContainerPlacement.java          |    6 +-
 .../testutils/ReplicationNodeManagerMock.java      |   93 +-
 .../hadoop/ozone/scm/node/TestSCMNodeMetrics.java  |   72 +-
 hadoop-hdds/tools/pom.xml                          |    6 +
 .../hdds/scm/cli/ContainerOperationClient.java     |   40 +-
 .../hdds/scm/cli/SafeModeWaitSubcommand.java       |   20 +-
 .../org/apache/hadoop/hdds/scm/cli/ScmOption.java  |   13 +
 .../hadoop/hdds/scm/cli/TopologySubcommand.java    |   43 +-
 .../CertCommands.java}                             |   18 +-
 .../hadoop/hdds/scm/cli/cert/InfoSubcommand.java   |   73 +
 .../hadoop/hdds/scm/cli/cert/ListSubcommand.java   |  102 ++
 .../hdds/scm/cli/cert/ScmCertSubcommand.java       |   33 +-
 .../hadoop/hdds/scm/cli/cert/package-info.java}    |   15 +-
 .../hdds/scm/cli/datanode/DatanodeCommands.java    |    5 +-
 ...deCommands.java => DecommissionSubCommand.java} |   47 +-
 .../hdds/scm/cli/datanode/ListInfoSubcommand.java  |   48 +-
 ...odeCommands.java => MaintenanceSubCommand.java} |   52 +-
 ...deCommands.java => RecommissionSubCommand.java} |   49 +-
 .../scm/cli/datanode/TestListInfoSubcommand.java   |  122 ++
 .../dev-support/findbugsExcludeFile.xml            |    5 +
 hadoop-ozone/client/pom.xml                        |   12 +
 .../org/apache/hadoop/ozone/client/BucketArgs.java |   25 +-
 .../apache/hadoop/ozone/client/OzoneBucket.java    |   62 +-
 .../apache/hadoop/ozone/client/OzoneVolume.java    |   66 +-
 .../org/apache/hadoop/ozone/client/VolumeArgs.java |   30 +-
 .../hadoop/ozone/client/io/KeyInputStream.java     |   11 +-
 .../hadoop/ozone/client/io/OzoneInputStream.java   |   11 +-
 .../ozone/client/protocol/ClientProtocol.java      |   12 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |   77 +-
 .../common/dev-support/findbugsExcludeFile.xml     |   17 +
 .../java/org/apache/hadoop}/ozone/OFSPath.java     |    7 +-
 .../apache/hadoop/ozone/conf/OMClientConfig.java   |   19 +-
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   24 +-
 .../hadoop/ozone/om/exceptions/OMException.java    |    2 +
 .../ozone/om/ha/OMFailoverProxyProvider.java       |    4 +-
 .../org/apache/hadoop/ozone/om/ha/OMProxyInfo.java |    1 +
 .../hadoop/ozone/om/helpers/OmBucketArgs.java      |   36 +-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java      |   58 +-
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |    9 +-
 .../hadoop/ozone/om/helpers/OmKeyLocationInfo.java |   12 +-
 .../ozone/om/helpers/OmKeyLocationInfoGroup.java   |    6 +-
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java     |   97 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java      |   73 +-
 .../hadoop/ozone/om/helpers/OzoneFileStatus.java   |    3 +-
 .../hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java |    4 +-
 .../ozone/om/protocol/OzoneManagerProtocol.java    |    4 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   25 +-
 .../hadoop/ozone/protocolPB/OzonePBHelper.java     |    2 +-
 .../ozone/security/OzoneTokenIdentifier.java       |    2 +-
 .../hadoop/ozone/security/acl/RequestContext.java  |   48 +-
 .../org/apache/hadoop/ozone/util/RadixTree.java    |    2 +-
 .../apache/hadoop/ozone/web/utils/OzoneUtils.java  |    4 +-
 .../org/apache/hadoop/ozone/TestOzoneAcls.java     |   14 +-
 .../ozone/om/ha/TestOMFailoverProxyProvider.java   |    6 +-
 .../hadoop/ozone/om/helpers/TestOmKeyInfo.java     |    3 +-
 .../hadoop/ozone/om/helpers/TestOmOzoneAclMap.java |   56 +
 .../hadoop/ozone/om/lock/TestOzoneManagerLock.java |    8 +-
 .../apache/hadoop/ozone/util/TestRadixTree.java    |    2 +-
 .../org/apache/hadoop/ozone/csi/NodeService.java   |    5 +-
 hadoop-ozone/datanode/pom.xml                      |   16 +-
 hadoop-ozone/dev-support/checks/acceptance.sh      |    2 +-
 hadoop-ozone/dev-support/checks/blockade.sh        |    2 +-
 hadoop-ozone/dev-support/checks/hadolint.sh        |   28 +-
 hadoop-ozone/dev-support/checks/kubernetes.sh      |    2 +-
 hadoop-ozone/dev-support/intellij/ozone-site.xml   |    4 +
 .../dist/dev-support/bin/dist-layout-stitching     |   14 +-
 .../compose/{ozone-om-ha-s3 => compatibility}/.env |    1 -
 .../{ozone => compatibility}/docker-compose.yaml   |    5 -
 .../compose/{ozone => compatibility}/docker-config |    8 +-
 .../{ozone-om-ha-s3 => compatibility}/test.sh      |   14 +-
 hadoop-ozone/dist/src/main/compose/ozone-csi/.env  |    2 +-
 .../src/main/compose/ozone-csi/docker-compose.yaml |    8 +-
 .../dist/src/main/compose/ozone-csi/docker-config  |    3 +
 hadoop-ozone/dist/src/main/compose/ozone-ha/.env   |    2 +-
 .../dist/src/main/compose/ozone-ha/docker-config   |    5 +-
 .../dist/src/main/compose/ozone-ha/test.sh         |    4 +
 .../dist/src/main/compose/ozone-mr/common-config   |    3 +
 .../compose/ozone-mr/hadoop27/docker-compose.yaml  |    5 +-
 .../main/compose/ozone-mr/hadoop27/docker-config   |    3 +
 .../dist/src/main/compose/ozone-mr/hadoop31/.env   |    2 +-
 .../compose/ozone-mr/hadoop31/docker-compose.yaml  |   13 +-
 .../main/compose/ozone-mr/hadoop31/docker-config   |    3 +
 .../dist/src/main/compose/ozone-mr/hadoop32/.env   |    4 +-
 .../compose/ozone-mr/hadoop32/docker-compose.yaml  |   11 +-
 .../main/compose/ozone-mr/hadoop32/docker-config   |    3 +
 .../dist/src/main/compose/ozone-mr/test.sh         |    1 +
 .../compose/ozone-om-ha-s3/docker-compose.yaml     |   99 --
 .../src/main/compose/ozone-om-ha-s3/docker-config  |   38 -
 .../src/main/compose/ozone-om-ha/docker-config     |    3 +
 .../dist/src/main/compose/ozone-topology/.env      |    2 +-
 .../compose/ozone-topology/docker-compose.yaml     |   16 +-
 .../src/main/compose/ozone-topology/docker-config  |    3 +
 hadoop-ozone/dist/src/main/compose/ozone/.env      |    2 +-
 hadoop-ozone/dist/src/main/compose/ozone/README.md |    2 +-
 .../src/main/compose/ozone/docker-compose.yaml     |   10 +-
 .../dist/src/main/compose/ozone/docker-config      |    5 +
 hadoop-ozone/dist/src/main/compose/ozone/test.sh   |    2 +
 .../src/main/compose/ozoneblockade/docker-config   |    3 +
 .../src/main/compose/ozones3-haproxy/docker-config |    3 +
 .../src/main/compose/ozonescripts/docker-config    |    3 +
 .../dist/src/main/compose/ozonescripts/start.sh    |    8 +-
 .../dist/src/main/compose/ozonescripts/stop.sh     |    2 +-
 .../{ozone-om-ha-s3 => ozonescripts}/test.sh       |   20 +-
 .../dist/src/main/compose/ozonesecure-mr/.env      |    4 +-
 .../compose/ozonesecure-mr/docker-compose.yaml     |   16 +-
 .../src/main/compose/ozonesecure-mr/docker-config  |    5 +
 .../dist/src/main/compose/ozonesecure-om-ha/.env   |    2 +-
 .../compose/ozonesecure-om-ha/docker-compose.yaml  |   20 +-
 .../main/compose/ozonesecure-om-ha/docker-config   |    3 +
 .../dist/src/main/compose/ozonesecure/.env         |    2 +-
 .../main/compose/ozonesecure/docker-compose.yaml   |   28 +-
 .../src/main/compose/ozonesecure/docker-config     |    8 +-
 .../src/main/compose/{ozone-csi => restart}/.env   |    4 +-
 .../src/main/compose/restart/README.md}            |   19 +-
 .../{upgrade => restart}/docker-compose.yaml       |    7 +
 .../main/compose/{ozone => restart}/docker-config  |    7 +-
 .../src/main/compose/{upgrade => restart}/test.sh  |   27 +-
 hadoop-ozone/dist/src/main/compose/test-all.sh     |    2 +-
 hadoop-ozone/dist/src/main/compose/testlib.sh      |   54 +-
 .../src/main/compose/upgrade/docker-compose.yaml   |    7 +
 .../dist/src/main/compose/upgrade/docker-config    |    3 +
 hadoop-ozone/dist/src/main/compose/upgrade/test.sh |    8 +-
 .../src/main/compose/{ozone-ha => xcompat}/.env    |    1 -
 .../docker-compose.yaml => xcompat/clients.yaml}   |   50 +-
 .../main/compose/{ozone => xcompat}/docker-config  |   25 +-
 .../new-cluster.yaml}                              |   53 +-
 .../old-cluster.yaml}                              |   55 +-
 hadoop-ozone/dist/src/main/compose/xcompat/test.sh |   93 ++
 .../dist/src/main/dockerlibexec/transformation.py  |    8 +-
 .../src/main/k8s/definitions/ozone/config.yaml     |    1 +
 .../src/main/smoketest/basic/ozone-shell-lib.robot |   42 +-
 .../dist/src/main/smoketest/cli/classpath.robot    |   46 +
 .../dist/src/main/smoketest/cli/envvars.robot      |   78 ++
 .../smoketest/compatibility/dn.robot}              |   23 +-
 .../smoketest/compatibility/om.robot}              |   23 +-
 .../smoketest/compatibility/read.robot}            |   13 +-
 .../smoketest/compatibility/recon.robot}           |   15 +-
 .../src/main/smoketest/compatibility/scm.robot}    |   23 +-
 .../smoketest/compatibility/write.robot}           |   13 +-
 .../dist/src/main/smoketest/createbucketenv.robot  |    2 +-
 .../dist/src/main/smoketest/createmrenv.robot      |    2 +-
 .../src/main/smoketest/debug/ozone-debug.robot     |    2 +-
 .../dist/src/main/smoketest/freon/freon.robot      |   37 -
 .../{topology/cli.robot => freon/generate.robot}   |   26 +-
 .../smoketest/freon/validate.robot}                |   14 +-
 .../dist/src/main/smoketest/gdpr/gdpr.robot        |    2 +-
 hadoop-ozone/dist/src/main/smoketest/lib/os.robot  |    4 +
 .../dist/src/main/smoketest/mapreduce.robot        |    2 +-
 .../dist/src/main/smoketest/ozone-lib/shell.robot  |   12 +-
 .../dist/src/main/smoketest/ozonefs/ozonefs.robot  |   15 +-
 .../cli.robot => security/admin-cert.robot}        |   29 +-
 .../dist/src/main/smoketest/spnego/web.robot       |   10 +-
 .../dist/src/main/smoketest/topology/cli.robot     |    4 +-
 hadoop-ozone/dist/src/shell/hdds/hadoop-config.cmd |  317 -----
 hadoop-ozone/dist/src/shell/hdds/hadoop-config.sh  |  165 ---
 hadoop-ozone/dist/src/shell/hdds/workers.sh        |   47 +-
 hadoop-ozone/dist/src/shell/ozone/ozone            |  264 ++--
 hadoop-ozone/dist/src/shell/ozone/ozone-config.sh  |   99 +-
 .../ozone-functions.sh}                            | 1397 ++++++++++----------
 hadoop-ozone/dist/src/shell/ozone/start-ozone.sh   |   84 +-
 hadoop-ozone/dist/src/shell/ozone/stop-ozone.sh    |   60 +-
 .../shell/shellprofile.d/hadoop-ozone-manager.sh   |    8 +-
 .../dist/src/shell/shellprofile.d/hadoop-ozone.sh  |    6 +-
 hadoop-ozone/dist/src/test/shell/gc_opts.bats      |   40 +-
 .../shell/ozone-functions_test_helper.bash}        |   29 +-
 .../shell/ozone_set_var_for_compatibility.bats     |   86 ++
 .../dev-support/findbugsExcludeFile.xml            |    5 +
 .../fault-injection-test/mini-chaos-tests/pom.xml  |   13 +
 .../apache/hadoop/ozone/MiniOzoneChaosCluster.java |    6 +
 .../hadoop/ozone/failure/FailureManager.java       |    2 +-
 .../org/apache/hadoop/ozone/failure/Failures.java  |    4 +
 .../hadoop/ozone/loadgenerators/LoadGenerator.java |    4 +-
 .../ozone/loadgenerators/RandomLoadGenerator.java  |    1 +
 .../loadgenerators/ReadOnlyLoadGenerator.java      |    3 +-
 .../ozone/insight/TestConfigurationSubCommand.java |    8 +-
 .../dev-support/findbugsExcludeFile.xml            |  131 ++
 hadoop-ozone/integration-test/pom.xml              |   32 +
 .../fs/contract/AbstractContractUnbufferTest.java  |  160 +++
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java    |    2 +-
 .../fs/ozone/TestOzoneFSWithObjectStoreCreate.java |   18 +-
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   |    1 -
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  361 ++---
 ...Stream.java => TestOzoneFileSystemMetrics.java} |  127 +-
 .../fs/ozone/TestOzoneFileSystemMissingParent.java |  127 ++
 .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java |   41 +-
 .../contract/ITestOzoneContractUnbuffer.java}      |   41 +-
 .../rooted/ITestRootedOzoneContractUnbuffer.java}  |   40 +-
 .../metrics/TestSCMContainerManagerMetrics.java    |    3 +-
 .../hdds/scm/pipeline/TestLeaderChoosePolicy.java  |    8 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java    |   12 +-
 .../hadoop/hdds/scm/pipeline/TestNodeFailure.java  |    2 +
 .../TestRatisPipelineCreateAndDestroy.java         |    7 +-
 .../hadoop/hdds/scm/pipeline/TestSCMRestart.java   |    2 +
 .../safemode/TestSCMSafeModeWithPipelineRules.java |    2 +-
 .../hadoop/hdds/upgrade/TestHDDSUpgrade.java       |   10 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |    1 +
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |   28 +-
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java       |   27 +-
 .../TestContainerStateMachineIdempotency.java      |    6 +-
 .../java/org/apache/hadoop/ozone/TestDataUtil.java |    6 +-
 .../apache/hadoop/ozone/TestDelegationToken.java   |    4 -
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |   89 +-
 .../hadoop/ozone/TestMiniOzoneHACluster.java       |    7 +-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |    5 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java       |    7 -
 .../hadoop/ozone/TestStorageContainerManager.java  |   11 +-
 .../ozone/TestStorageContainerManagerHelper.java   |   29 +
 .../ozone/client/CertificateClientTestImpl.java    |   11 +
 .../apache/hadoop/ozone/client/rpc/TestBCSID.java  |    3 +-
 .../rpc/TestBlockOutputStreamWithFailures.java     |   19 +-
 ...estBlockOutputStreamWithFailuresFlushDelay.java |   16 +-
 .../rpc/TestCloseContainerHandlingByClient.java    |    4 +-
 .../hadoop/ozone/client/rpc/TestCommitWatcher.java |   44 +-
 .../rpc/TestContainerReplicationEndToEnd.java      |    3 +-
 .../client/rpc/TestContainerStateMachine.java      |   15 +-
 .../TestContainerStateMachineFailureOnRead.java    |    3 +-
 .../rpc/TestContainerStateMachineFailures.java     |   23 +-
 .../rpc/TestContainerStateMachineFlushDelay.java   |   12 +-
 .../client/rpc/TestDeleteWithSlowFollower.java     |    4 +-
 .../client/rpc/TestDiscardPreallocatedBlocks.java  |    1 +
 .../client/rpc/TestFailureHandlingByClient.java    |   40 +-
 .../rpc/TestFailureHandlingByClientFlushDelay.java |   11 +-
 .../client/rpc/TestHybridPipelineOnDatanode.java   |    7 +-
 .../ozone/client/rpc/TestKeyInputStream.java       |  220 ++-
 .../rpc/TestMultiBlockWritesWithDnFailures.java    |   24 +-
 .../client/rpc/TestOzoneAtRestEncryption.java      |   20 +-
 ...estOzoneClientRetriesOnExceptionFlushDelay.java |    2 +-
 ...ava => TestOzoneClientRetriesOnExceptions.java} |   36 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |  476 ++++---
 .../client/rpc/TestOzoneRpcClientWithRatis.java    |   21 +-
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |   12 +-
 .../ozone/client/rpc/TestSecureOzoneRpcClient.java |   21 +-
 .../client/rpc/TestValidateBCSIDOnRestart.java     |    9 +-
 .../ozone/client/rpc/TestWatchForCommit.java       |   29 +-
 .../ozone/container/TestContainerReplication.java  |  203 ++-
 .../apache/hadoop/ozone/container/TestHelper.java  |   85 +-
 .../commandhandler/TestBlockDeletion.java          |   20 +-
 .../TestCloseContainerByPipeline.java              |    9 +-
 .../commandhandler/TestCloseContainerHandler.java  |    3 +-
 .../commandhandler/TestDeleteContainerHandler.java |    4 +-
 .../transport/server/ratis/TestCSMMetrics.java     |    2 -
 .../container/metrics/TestContainerMetrics.java    |   13 +-
 .../ozoneimpl/TestOzoneContainerWithTLS.java       |    2 +-
 .../container/server/TestContainerServer.java      |   84 +-
 .../server/TestSecureContainerServer.java          |   62 +-
 .../ozone/dn/ratis/TestDnRatisLogParser.java       |   12 +-
 .../hadoop/ozone/dn/scrubber/TestDataScrubber.java |   10 +-
 .../ozone/om/TestContainerReportWithKeys.java      |    4 +-
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java |   22 +-
 .../org/apache/hadoop/ozone/om/TestKeyPurging.java |    6 +-
 .../hadoop/ozone/om/TestOMDbCheckpointServlet.java |   19 +-
 ...gerRestart.java => TestOMEpochForNonRatis.java} |  157 +--
 .../hadoop/ozone/om/TestOMRatisSnapshots.java      |   24 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java     |    6 -
 .../org/apache/hadoop/ozone/om/TestOmLDBCli.java   |   18 +-
 .../ozone/om/TestOzoneManagerConfiguration.java    |   13 +-
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java |   23 +-
 .../ozone/om/TestOzoneManagerHAMetadataOnly.java   |   61 +-
 .../ozone/om/TestOzoneManagerHAWithData.java       |   16 +-
 .../hadoop/ozone/om/TestOzoneManagerPrepare.java   |    4 +-
 .../hadoop/ozone/om/TestOzoneManagerRestart.java   |  104 +-
 .../apache/hadoop/ozone/om/TestScmSafeMode.java    |   12 +-
 .../ozone/om/parser/TestOMRatisLogParser.java      |   25 +-
 .../snapshot/TestOzoneManagerSnapshotProvider.java |    2 -
 .../ozone/recon/TestReconWithOzoneManagerHA.java   |   23 +-
 .../hadoop/ozone/scm/TestContainerSmallFile.java   |   10 +-
 .../scm/TestGetCommittedBlockLengthAndPutKey.java  |    6 +-
 .../TestSCMContainerPlacementPolicyMetrics.java    |    9 +-
 .../org/apache/hadoop/ozone/scm/TestSCMMXBean.java |    4 +-
 .../hadoop/ozone/scm/TestSCMNodeManagerMXBean.java |   30 +-
 .../hadoop/ozone/scm/TestXceiverClientManager.java |    6 +-
 .../scm/node/TestDecommissionAndMaintenance.java   |  709 ++++++++++
 .../hadoop/ozone/scm/node/TestQueryNode.java       |   67 +-
 .../scm/pipeline/TestPipelineManagerMXBean.java    |    2 +-
 .../TestSCMPipelineBytesWrittenMetrics.java        |    8 +-
 .../hadoop/ozone/shell/TestOzoneShellHA.java       |  253 +++-
 .../src/test/resources/contract/ozone.xml          |    5 +
 .../src/main/proto/OmClientProtocol.proto          |   30 +-
 .../dev-support/findbugsExcludeFile.xml            |   14 +
 .../apache/hadoop/ozone/om/OMMetadataManager.java  |   10 +
 .../hadoop/ozone/om/codec/OmKeyInfoCodec.java      |    6 +-
 .../ozone/om/codec/RepeatedOmKeyInfoCodec.java     |    6 +-
 .../hadoop/ozone/om/codec/TestOmKeyInfoCodec.java  |   13 +-
 .../om/codec/TestOmMultipartKeyInfoCodec.java      |    4 +-
 .../ozone/om/codec/TestRepeatedOmKeyInfoCodec.java |   13 +-
 .../dev-support/findbugsExcludeFile.xml            |   80 ++
 hadoop-ozone/ozone-manager/pom.xml                 |    7 +
 .../apache/hadoop/ozone/om/KeyDeletingService.java |   19 +-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  163 ++-
 .../hadoop/ozone/om/OMDBCheckpointServlet.java     |   60 +
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |   89 ++
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |   25 +-
 .../hadoop/ozone/om/OpenKeyCleanupService.java     |    2 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  151 ++-
 .../hadoop/ozone/om/OzoneManagerStarter.java       |    1 +
 .../org/apache/hadoop/ozone/om/OzoneTrash.java     |   39 +-
 .../hadoop/ozone/om/TrashOzoneFileSystem.java      |  534 ++++++++
 .../apache/hadoop/ozone/om/TrashPolicyOzone.java   |  108 +-
 .../hadoop/ozone/om/codec/OMDBDefinition.java      |   38 +-
 .../apache/hadoop/ozone/om/ha/OMHANodeDetails.java |   14 +-
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java   |   38 +-
 .../ozone/om/ratis/OzoneManagerRatisServer.java    |  225 +---
 .../om/ratis/OzoneManagerRatisServerConfig.java    |   54 +
 .../ozone/om/ratis/OzoneManagerStateMachine.java   |    1 -
 .../metrics/OzoneManagerDoubleBufferMetrics.java   |    2 +-
 .../hadoop/ozone/om/request/OMClientRequest.java   |   36 +-
 .../om/request/bucket/OMBucketCreateRequest.java   |   35 +-
 .../om/request/bucket/OMBucketDeleteRequest.java   |   17 +-
 .../request/bucket/OMBucketSetPropertyRequest.java |   29 +-
 .../om/request/bucket/acl/OMBucketAclRequest.java  |   26 +-
 .../request/bucket/acl/OMBucketAddAclRequest.java  |   20 +-
 .../bucket/acl/OMBucketRemoveAclRequest.java       |   20 +-
 .../request/bucket/acl/OMBucketSetAclRequest.java  |   20 +-
 .../om/request/file/OMDirectoryCreateRequest.java  |   13 +-
 .../ozone/om/request/file/OMFileCreateRequest.java |   19 +-
 .../om/request/key/OMAllocateBlockRequest.java     |    8 +-
 .../ozone/om/request/key/OMKeyCommitRequest.java   |   19 +-
 .../ozone/om/request/key/OMKeyCreateRequest.java   |   21 +-
 .../ozone/om/request/key/OMKeyDeleteRequest.java   |    9 +-
 .../ozone/om/request/key/OMKeyRenameRequest.java   |    3 +-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |   61 +-
 .../ozone/om/request/key/OMKeysDeleteRequest.java  |    6 +-
 .../ozone/om/request/key/acl/OMKeyAclRequest.java  |   16 +-
 .../om/request/key/acl/OMKeyAddAclRequest.java     |   24 +-
 .../om/request/key/acl/OMKeyRemoveAclRequest.java  |   24 +-
 .../om/request/key/acl/OMKeySetAclRequest.java     |   24 +-
 .../request/key/acl/prefix/OMPrefixAclRequest.java |    9 +-
 .../key/acl/prefix/OMPrefixAddAclRequest.java      |   13 +-
 .../key/acl/prefix/OMPrefixRemoveAclRequest.java   |   13 +-
 .../key/acl/prefix/OMPrefixSetAclRequest.java      |   13 +-
 .../multipart/S3MultipartUploadAbortRequest.java   |    5 +-
 .../S3MultipartUploadCommitPartRequest.java        |   10 +-
 .../ozone/om/request/upgrade/OMPrepareRequest.java |    2 +-
 .../om/request/volume/OMVolumeSetQuotaRequest.java |   16 +-
 .../om/request/volume/acl/OMVolumeAclRequest.java  |   20 +-
 .../request/volume/acl/OMVolumeAddAclRequest.java  |   19 +-
 .../volume/acl/OMVolumeRemoveAclRequest.java       |   19 +-
 .../request/volume/acl/OMVolumeSetAclRequest.java  |   20 +-
 .../om/response/bucket/OMBucketCreateResponse.java |   17 +
 .../om/response/bucket/OMBucketDeleteResponse.java |   18 +
 .../om/response/file/OMFileCreateResponse.java     |    5 +-
 .../om/response/key/OMAllocateBlockResponse.java   |    9 +-
 .../ozone/om/response/key/OMKeyCommitResponse.java |    7 +-
 .../ozone/om/response/key/OMKeyCreateResponse.java |   10 +-
 .../ozone/om/response/key/OMKeyDeleteResponse.java |    7 +-
 .../om/response/key/OMKeysDeleteResponse.java      |    9 +-
 .../om/response/key/OMOpenKeysDeleteRequest.java   |    1 +
 .../multipart/S3MultipartUploadAbortResponse.java  |    8 +-
 .../S3MultipartUploadCommitPartResponse.java       |    8 +-
 ...OzoneManagerProtocolServerSideTranslatorPB.java |   57 +-
 .../protocolPB/OzoneManagerRequestHandler.java     |   37 +-
 .../hadoop/ozone/security/AWSV4AuthValidator.java  |   11 +-
 .../security/OzoneBlockTokenSecretManager.java     |    8 +-
 .../OzoneDelegationTokenSecretManager.java         |   38 +-
 .../ozone/security/acl/OzoneNativeAuthorizer.java  |    1 +
 .../apache/hadoop/ozone/om/TestChunkStreams.java   |    4 +-
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java |  135 +-
 .../apache/hadoop/ozone/om/TestOMDBDefinition.java |   74 ++
 .../hadoop/ozone/om/TestOmMetadataManager.java     |   17 +-
 .../ozone/om/TestOzoneManagerHttpServer.java       |    2 +
 .../hadoop/ozone/om/TestOzoneManagerStarter.java   |   15 +-
 .../hadoop/ozone/om/failover/TestOMFailovers.java  |   11 +-
 ...tOzoneManagerDoubleBufferWithDummyResponse.java |    2 +-
 .../om/ratis/TestOzoneManagerRatisServer.java      |   17 +-
 .../ozone/om/request/TestOMRequestUtils.java       |   14 +-
 .../bucket/TestOMBucketSetPropertyRequest.java     |    7 +-
 .../request/file/TestOMDirectoryCreateRequest.java |   45 +-
 .../om/request/file/TestOMFileCreateRequest.java   |    4 +-
 .../om/request/key/TestOMAllocateBlockRequest.java |    4 +-
 .../om/request/key/TestOMKeyCreateRequest.java     |    4 +-
 .../ozone/om/request/key/TestOMKeyRequest.java     |    9 +-
 .../request/key/TestOMOpenKeysDeleteRequest.java   |    5 +-
 .../security/TestOMGetDelegationTokenRequest.java  |    6 +-
 .../volume/TestOMVolumeSetQuotaRequest.java        |   25 +-
 .../volume/acl/TestOMVolumeAddAclRequest.java      |    1 -
 .../volume/acl/TestOMVolumeSetAclRequest.java      |    1 -
 .../response/key/TestOMAllocateBlockResponse.java  |   11 +-
 .../om/response/key/TestOMKeyCommitResponse.java   |   11 +-
 .../om/response/key/TestOMKeyCreateResponse.java   |   11 +-
 .../om/response/key/TestOMKeyDeleteResponse.java   |   17 +-
 .../ozone/om/response/key/TestOMKeyResponse.java   |    3 +
 .../om/response/key/TestOMKeysDeleteResponse.java  |   13 +-
 .../response/key/TestOMOpenKeysDeleteResponse.java |    3 +-
 .../s3/multipart/TestS3MultipartResponse.java      |    6 +-
 .../TestS3MultipartUploadAbortResponse.java        |   18 +-
 .../security/TestOzoneBlockTokenSecretManager.java |  197 ++-
 .../TestOzoneDelegationTokenSecretManager.java     |   38 +-
 .../ozone/security/TestOzoneTokenIdentifier.java   |    2 +-
 .../security/acl/TestOzoneNativeAuthorizer.java    |    8 +-
 .../ozone/security/acl/TestRequestContext.java     |   94 ++
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |    4 +-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |   31 +-
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |    5 +-
 .../fs/ozone/BasicRootedOzoneFileSystem.java       |   32 +-
 .../hadoop/fs/ozone/CapableOzoneFSInputStream.java |    3 +-
 .../org/apache/hadoop/fs/ozone/O3fsDtFetcher.java  |    3 +
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java |    2 +-
 .../apache/hadoop/fs/ozone/OzoneFSInputStream.java |   10 +-
 .../org/apache/hadoop/fs/ozone/OzoneFsShell.java   |    1 +
 .../hadoop/fs/ozone/OzoneStreamCapabilities.java   |    9 +
 .../hadoop/fs/ozone/TestBasicOzoneFileSystems.java |   89 ++
 .../org/apache/hadoop/fs/ozone/TestOFSPath.java    |    1 +
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java    |   20 +-
 hadoop-ozone/ozonefs-hadoop2/pom.xml               |    8 +
 hadoop-ozone/ozonefs-hadoop3/pom.xml               |    8 +
 hadoop-ozone/pom.xml                               |    2 +-
 .../org/hadoop/ozone/recon/codegen/SqlDbUtils.java |    7 +-
 .../recon/schema/ContainerSchemaDefinition.java    |   19 -
 .../recon/dev-support/findbugsExcludeFile.xml      |   26 +
 .../hadoop/ozone/recon/ReconControllerModule.java  |   19 +-
 .../hadoop/ozone/recon/ReconRestServletModule.java |    6 +-
 .../org/apache/hadoop/ozone/recon/ReconUtils.java  |    2 +-
 .../ozone/recon/api/ClusterStateEndpoint.java      |    6 +-
 .../hadoop/ozone/recon/api/ContainerEndpoint.java  |   25 +-
 .../hadoop/ozone/recon/api/NodeEndpoint.java       |    7 +-
 .../recon/api/types/MissingContainerMetadata.java  |    2 +-
 .../api/types/UnhealthyContainerMetadata.java      |    2 +-
 .../codec/ContainerReplicaHistoryListCodec.java    |   86 ++
 .../ozone/recon/fsck/ContainerHealthTask.java      |   39 +-
 ...ager.java => ContainerHealthSchemaManager.java} |   51 +-
 .../ozone/recon/persistence/ContainerHistory.java  |   79 ++
 .../ozone/recon/scm/ContainerReplicaHistory.java   |   62 +
 .../recon/scm/ContainerReplicaHistoryList.java}    |   35 +-
 .../ozone/recon/scm/ReconContainerManager.java     |  215 ++-
 .../recon/scm/ReconDatanodeProtocolServer.java     |    1 +
 .../hadoop/ozone/recon/scm/ReconNodeManager.java   |    9 +-
 .../scm/ReconStorageContainerManagerFacade.java    |   23 +-
 .../recon/spi/ContainerDBServiceProvider.java      |   33 +
 .../spi/impl/ContainerDBServiceProviderImpl.java   |   91 +-
 .../recon/spi/impl/ContainerKeyPrefixCodec.java    |    2 +-
 .../spi/impl/OzoneManagerServiceProviderImpl.java  |    4 +-
 .../ozone/recon/spi/impl/ReconDBDefinition.java    |   14 +-
 .../ozone/recon/tasks/ContainerKeyMapperTask.java  |    7 +-
 .../ozone/recon/tasks/FileSizeCountTask.java       |    6 +-
 .../ozone/recon/tasks/OMUpdateEventBatch.java      |   21 +-
 .../hadoop/ozone/recon/tasks/ReconOmTask.java      |    9 -
 .../ozone/recon/tasks/ReconTaskControllerImpl.java |    8 +-
 .../hadoop/ozone/recon/tasks/TableCountTask.java   |    7 +-
 .../apache/hadoop/ozone/recon/TestReconUtils.java  |   30 +-
 .../ozone/recon/api/TestContainerEndpoint.java     |  194 ++-
 .../hadoop/ozone/recon/api/TestEndpoints.java      |   47 +-
 .../ozone/recon/fsck/TestContainerHealthTask.java  |   12 +-
 .../TestContainerHealthTaskRecordGenerator.java    |    8 +-
 .../scm/AbstractReconContainerManagerTest.java     |    6 +-
 .../ozone/recon/scm/TestReconContainerManager.java |  102 +-
 .../ozone/recon/scm/TestReconNodeManager.java      |   27 +
 .../impl/TestContainerDBServiceProviderImpl.java   |   15 +-
 .../impl/TestOzoneManagerServiceProviderImpl.java  |   28 +-
 .../hadoop/ozone/recon/tasks/DummyReconDBTask.java |    1 -
 .../ozone/recon/tasks/TestFileSizeCountTask.java   |    8 +
 .../ozone/recon/tasks/TestOMDBUpdatesHandler.java  |    7 +-
 .../recon/tasks/TestReconTaskControllerImpl.java   |    7 -
 .../ozone/recon/tasks/TestTableCountTask.java      |    5 -
 .../dev-support/findbugsExcludeFile.xml            |    8 +-
 hadoop-ozone/s3gateway/pom.xml                     |    8 +
 .../hadoop/ozone/s3/AWSSignatureProcessor.java     |   12 +-
 .../hadoop/ozone/s3/OzoneClientProducer.java       |    6 +-
 .../apache/hadoop/ozone/s3/SignatureProcessor.java |    2 -
 .../hadoop/ozone/s3/endpoint/BucketEndpoint.java   |   49 +-
 .../hadoop/ozone/s3/endpoint/EndpointBase.java     |   29 +-
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |   23 +-
 .../hadoop/ozone/s3/exception/S3ErrorTable.java    |    6 +-
 .../ozone/s3/header/AuthorizationHeaderV2.java     |    2 +-
 .../ozone/s3/header/AuthorizationHeaderV4.java     |    8 +-
 .../apache/hadoop/ozone/s3/util/RangeHeader.java   |    2 +-
 .../hadoop/ozone/client/ObjectStoreStub.java       |    2 +-
 .../hadoop/ozone/client/OzoneBucketStub.java       |    6 +-
 .../hadoop/ozone/client/OzoneVolumeStub.java       |    5 +-
 .../ozone/s3/TestSignedChunksInputStream.java      |   24 +-
 .../ozone/s3/TestVirtualHostStyleFilter.java       |    4 +-
 .../hadoop/ozone/s3/endpoint/TestBucketGet.java    |    5 +-
 .../hadoop/ozone/s3/endpoint/TestBucketPut.java    |    2 -
 .../hadoop/ozone/s3/endpoint/TestListParts.java    |    8 +-
 .../s3/endpoint/TestMultipartUploadComplete.java   |    9 +-
 .../s3/endpoint/TestMultipartUploadWithCopy.java   |   19 +-
 .../hadoop/ozone/s3/endpoint/TestObjectPut.java    |   30 +-
 .../hadoop/ozone/s3/endpoint/TestPartUpload.java   |   12 +-
 .../ozone/s3/endpoint/TestPermissionCheck.java     |  268 ++++
 ...estOS3Exception.java => TestOS3Exceptions.java} |   18 +-
 .../ozone/s3/header/TestAuthorizationHeaderV4.java |    4 +-
 .../tools/dev-support/findbugsExcludeFile.xml      |   20 +-
 .../org/apache/hadoop/ozone/admin/om/OMAdmin.java  |   16 +-
 .../audit/parser/handler/LoadCommandHandler.java   |    1 +
 .../audit/parser/handler/QueryCommandHandler.java  |    1 +
 .../parser/handler/TemplateCommandHandler.java     |    1 +
 .../apache/hadoop/ozone/debug/ChunkKeyHandler.java |    4 -
 .../hadoop/ozone/debug/DBDefinitionFactory.java    |   38 +-
 .../org/apache/hadoop/ozone/debug/DBScanner.java   |   10 +-
 .../hadoop/ozone/freon/BaseFreonGenerator.java     |   19 +-
 .../ozone/freon/ClosedContainerReplicator.java     |  213 +++
 .../hadoop/ozone/freon/DatanodeChunkGenerator.java |  147 +-
 .../java/org/apache/hadoop/ozone/freon/Freon.java  |    3 +-
 .../hadoop/ozone/freon/HadoopFsGenerator.java      |   45 +-
 .../hadoop/ozone/freon/RandomKeyGenerator.java     |    2 +-
 .../apache/hadoop/ozone/fsck/ContainerMapper.java  |    4 +-
 .../ozone/genesis/BenchMarkMetadataStoreReads.java |    8 +-
 .../genesis/BenchMarkMetadataStoreWrites.java      |    6 +-
 .../ozone/genesis/BenchMarkRocksDbStore.java       |    8 +-
 .../apache/hadoop/ozone/genesis/BenchMarkSCM.java  |    2 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java    |    2 -
 .../hadoop/ozone/shell/ClearSpaceQuotaOptions.java |   10 +-
 .../org/apache/hadoop/ozone/shell/Handler.java     |    4 +
 .../hadoop/ozone/shell/SetSpaceQuotaOptions.java   |    8 +-
 .../java/org/apache/hadoop/ozone/shell/Shell.java  |    2 +-
 .../ozone/shell/bucket/ClearQuotaHandler.java      |    4 +-
 .../ozone/shell/bucket/CreateBucketHandler.java    |   13 +-
 .../hadoop/ozone/shell/bucket/SetQuotaHandler.java |   27 +-
 .../ozone/shell/volume/ClearQuotaHandler.java      |    4 +-
 .../ozone/shell/volume/CreateVolumeHandler.java    |   13 +-
 .../hadoop/ozone/shell/volume/SetQuotaHandler.java |   26 +-
 .../hadoop/ozone/audit/parser/TestAuditParser.java |   15 +-
 .../hadoop/ozone/conf/TestGetConfOptions.java      |   29 +-
 .../ozone/debug/TestDBDefinitionFactory.java       |   13 +-
 .../hadoop/ozone/freon/TestContentGenerator.java   |    1 -
 .../TestGenerateOzoneRequiredConfigurations.java   |   13 +-
 .../org/apache/hadoop/test/OzoneTestDriver.java    |    4 +-
 pom.xml                                            |   52 +-
 tools/fault-injection-service/README.md            |    2 +-
 828 files changed, 22843 insertions(+), 8525 deletions(-)

diff --cc hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 81470b2,a1f490b..caefb0d
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@@ -383,7 -388,8 +389,11 @@@ public final class OzoneConsts 
    // An on-disk transient marker file used when replacing DB with checkpoint
    public static final String DB_TRANSIENT_MARKER = "dbInconsistentMarker";
  
 +  // An on-disk marker file used to indicate that the OM is in prepare and
 +  // should remain prepared even after a restart.
 +  public static final String PREPARE_MARKER = "prepareMarker";
+ 
+   public static final String OM_RATIS_SNAPSHOT_DIR = "snapshot";
+ 
+   public static final long DEFAULT_OM_UPDATE_ID = -1L;  
  }
diff --cc hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index 7cd8f78,f4b7185..1992b59
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@@ -142,10 -130,10 +142,10 @@@ public abstract class Storage 
      return storageInfo;
    }
  
-   abstract protected Properties getNodeProperties();
+   protected abstract Properties getNodeProperties();
  
    /**
 -   * Sets the Node properties specific to OM/SCM.
 +   * Sets the Node properties specific to OM/SCM/DataNode.
     */
    private void setNodeProperties() {
      Properties nodeProperties = getNodeProperties();
diff --cc hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
index f3d71be,f3d71be..91849c0
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
@@@ -67,6 -67,6 +67,7 @@@ public class ConfigFileGenerator extend
      try {
  
        //load existing generated config (if exists)
++      boolean resourceExists = true;
        ConfigFileAppender appender = new ConfigFileAppender();
        try (InputStream input = filer
            .getResource(StandardLocation.CLASS_OUTPUT, "",
@@@ -74,6 -74,6 +75,7 @@@
          appender.load(input);
        } catch (FileNotFoundException | NoSuchFileException ex) {
          appender.init();
++        resourceExists = false;
        }
  
        Set<? extends Element> annotatedElements =
@@@ -100,13 -100,13 +102,16 @@@
          }
  
        }
--      FileObject resource = filer
--          .createResource(StandardLocation.CLASS_OUTPUT, "",
--              OUTPUT_FILE_NAME);
  
--      try (Writer writer = new OutputStreamWriter(
--          resource.openOutputStream(), StandardCharsets.UTF_8)) {
--        appender.write(writer);
++      if (!resourceExists) {
++        FileObject resource = filer
++            .createResource(StandardLocation.CLASS_OUTPUT, "",
++                OUTPUT_FILE_NAME);
++
++        try (Writer writer = new OutputStreamWriter(
++            resource.openOutputStream(), StandardCharsets.UTF_8)) {
++          appender.write(writer);
++        }
        }
  
      } catch (IOException e) {
diff --cc hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index c0e57e8,55047ea..ff4c5c7
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@@ -49,18 -42,16 +49,20 @@@ import org.apache.hadoop.ozone.containe
  import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CreatePipelineCommandHandler;
  import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler;
  import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteContainerCommandHandler;
 +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.FinalizeNewLayoutVersionCommandHandler;
  import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ReplicateContainerCommandHandler;
+ import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.SetNodeOperationalStateCommandHandler;
  import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
  import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
  import org.apache.hadoop.ozone.container.replication.ContainerReplicator;
  import org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator;
+ import org.apache.hadoop.ozone.container.replication.MeasuredReplicator;
  import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor;
  import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
 +import org.apache.hadoop.ozone.container.upgrade.DataNodeLayoutAction;
 +import org.apache.hadoop.ozone.container.upgrade.DataNodeUpgradeFinalizer;
  import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 +import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages;
  import org.apache.hadoop.util.JvmPauseMonitor;
  import org.apache.hadoop.util.Time;
  
@@@ -177,7 -151,7 +182,8 @@@ public class DatanodeStateMachine imple
              dnConf.getContainerDeleteThreads()))
          .addHandler(new ClosePipelineCommandHandler())
          .addHandler(new CreatePipelineCommandHandler(conf))
+         .addHandler(new SetNodeOperationalStateCommandHandler(conf))
 +        .addHandler(new FinalizeNewLayoutVersionCommandHandler())
          .setConnectionManager(connectionManager)
          .setContainer(container)
          .setContext(context)
diff --cc hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 4a40496,4e436c4..dbf3bba
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@@ -53,9 -50,9 +53,10 @@@ import org.apache.hadoop.ozone.protocol
  import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand;
  import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
  import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
 +import org.apache.hadoop.ozone.protocol.commands.FinalizeNewLayoutVersionCommand;
  import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
  
+ import org.apache.hadoop.ozone.protocol.commands.SetNodeOperationalStateCommand;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
@@@ -367,16 -330,17 +366,27 @@@ public class HeartbeatEndpointTas
          }
          this.context.addCommand(closePipelineCommand);
          break;
+       case setNodeOperationalStateCommand:
+         SetNodeOperationalStateCommand setNodeOperationalStateCommand =
+             SetNodeOperationalStateCommand.getFromProtobuf(
+                 commandResponseProto.getSetNodeOperationalStateCommandProto());
+         if (LOG.isDebugEnabled()) {
+           LOG.debug("Received SCM set operational state command. State: {} " +
+               "Expiry: {}", setNodeOperationalStateCommand.getOpState(),
+               setNodeOperationalStateCommand.getStateExpiryEpochSeconds());
+         }
+         this.context.addCommand(setNodeOperationalStateCommand);
+         break;
 +      case finalizeNewLayoutVersionCommand:
 +        FinalizeNewLayoutVersionCommand finalizeNewLayoutVersionCommand =
 +            FinalizeNewLayoutVersionCommand.getFromProtobuf(
 +                commandResponseProto.getFinalizeNewLayoutVersionCommandProto());
 +        if (LOG.isDebugEnabled()) {
 +          LOG.debug("Received SCM finalize command {}",
 +              finalizeNewLayoutVersionCommand.getId());
 +        }
 +        this.context.addCommand(finalizeNewLayoutVersionCommand);
 +        break;
        default:
          throw new IllegalArgumentException("Unknown response : "
              + commandResponseProto.getCommandType().name());
diff --cc hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
index 886b43c,7621cb8..dde514e
--- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
+++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
@@@ -62,9 -63,9 +63,12 @@@ message ScmContainerLocationRequest 
    optional GetPipelineRequestProto getPipelineRequest = 24;
    optional GetContainerWithPipelineBatchRequestProto getContainerWithPipelineBatchRequest = 25;
    optional GetSafeModeRuleStatusesRequestProto getSafeModeRuleStatusesRequest = 26;
-   optional FinalizeScmUpgradeRequestProto finalizeScmUpgradeRequest = 27;
+   optional DecommissionNodesRequestProto decommissionNodesRequest = 27;
+   optional RecommissionNodesRequestProto recommissionNodesRequest = 28;
+   optional StartMaintenanceNodesRequestProto startMaintenanceNodesRequest = 29;
++  optional FinalizeScmUpgradeRequestProto finalizeScmUpgradeRequest = 30;
 +  optional QueryUpgradeFinalizationProgressRequestProto
-   queryUpgradeFinalizationProgressRequest = 28;
++  queryUpgradeFinalizationProgressRequest = 31;
  }
  
  message ScmContainerLocationResponse {
@@@ -99,9 -100,9 +103,12 @@@
    optional GetPipelineResponseProto getPipelineResponse = 24;
    optional GetContainerWithPipelineBatchResponseProto getContainerWithPipelineBatchResponse = 25;
    optional GetSafeModeRuleStatusesResponseProto getSafeModeRuleStatusesResponse = 26;
-   optional FinalizeScmUpgradeResponseProto finalizeScmUpgradeResponse = 27;
+   optional DecommissionNodesResponseProto decommissionNodesResponse = 27;
+   optional RecommissionNodesResponseProto recommissionNodesResponse = 28;
+   optional StartMaintenanceNodesResponseProto startMaintenanceNodesResponse = 29;
++  optional FinalizeScmUpgradeResponseProto finalizeScmUpgradeResponse = 30;
 +  optional QueryUpgradeFinalizationProgressResponseProto
-   queryUpgradeFinalizationProgressResponse = 28;
++  queryUpgradeFinalizationProgressResponse = 31;
    enum Status {
      OK = 1;
      CONTAINER_ALREADY_EXISTS = 2;
@@@ -132,8 -133,9 +139,11 @@@ enum Type 
    GetPipeline = 19;
    GetContainerWithPipelineBatch = 20;
    GetSafeModeRuleStatuses = 21;
-   FinalizeScmUpgrade = 22;
-   QueryUpgradeFinalizationProgress = 23;
+   DecommissionNodes = 22;
+   RecommissionNodes = 23;
+   StartMaintenanceNodes = 24;
++  FinalizeScmUpgrade = 25;
++  QueryUpgradeFinalizationProgress = 26;
  }
  
  /**
diff --cc hadoop-hdds/interface-client/src/main/proto/hdds.proto
index 3517731,0223814..b1cf85d
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@@ -129,9 -131,14 +131,15 @@@ enum NodeState 
      HEALTHY = 1;
      STALE = 2;
      DEAD = 3;
++    HEALTHY_READONLY = 6;
+ }
+ 
+ enum NodeOperationalState {
+     IN_SERVICE = 1;
 -    DECOMMISSIONING = 2;
 -    DECOMMISSIONED = 3;
 -    ENTERING_MAINTENANCE = 4;
 -    IN_MAINTENANCE = 5;
++    ENTERING_MAINTENANCE = 2;
++    IN_MAINTENANCE = 3;
 +    DECOMMISSIONING = 4;
 +    DECOMMISSIONED = 5;
-     HEALTHY_READONLY = 6;
  }
  
  enum QueryScope {
diff --cc hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
index 9d0dbd2,505f9ca..f129c0d
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
@@@ -304,7 -293,7 +304,8 @@@ message SCMCommandProto 
      replicateContainerCommand = 5;
      createPipelineCommand = 6;
      closePipelineCommand = 7;
-     finalizeNewLayoutVersionCommand = 8;
+     setNodeOperationalStateCommand = 8;
++    finalizeNewLayoutVersionCommand = 9;
    }
    // TODO: once we start using protoc 3.x, refactor this message using "oneof"
    required Type commandType = 1;
@@@ -315,8 -304,7 +316,9 @@@
    optional ReplicateContainerCommandProto replicateContainerCommandProto = 6;
    optional CreatePipelineCommandProto createPipelineCommandProto = 7;
    optional ClosePipelineCommandProto closePipelineCommandProto = 8;
+   optional SetNodeOperationalStateCommandProto setNodeOperationalStateCommandProto = 9;
 +  optional FinalizeNewLayoutVersionCommandProto
-   finalizeNewLayoutVersionCommandProto = 9;
++  finalizeNewLayoutVersionCommandProto = 10;
  }
  
  /**
@@@ -405,16 -393,13 +407,22 @@@ message ClosePipelineCommandProto 
    required int64 cmdId = 2;
  }
  
+ message SetNodeOperationalStateCommandProto {
+   required  int64 cmdId = 1;
+   required  NodeOperationalState nodeOperationalState = 2;
+   required  int64 stateExpiryEpochSeconds = 3;
+ }
+ 
  /**
 + * This command asks the DataNode to finalize a new layout version.
 + */
 +message FinalizeNewLayoutVersionCommandProto {
 +  required bool finalizeNewLayoutVersion = 1 [default = false];
 +  required LayoutVersionProto dataNodeLayoutVersion = 2;
 +  required int64 cmdId = 3;
 +}
 +
 +/**
   * Protocol used from a datanode to StorageContainerManager.
   *
   * Please see the request and response messages for details of the RPC calls.
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
index 92ae43b,edd616f..29105ad
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
@@@ -18,10 -18,9 +18,11 @@@
  
  package org.apache.hadoop.hdds.scm.node;
  
+ import com.google.common.annotations.VisibleForTesting;
  import org.apache.hadoop.hdds.protocol.DatanodeDetails;
  import org.apache.hadoop.hdds.protocol.proto
 +    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 +import org.apache.hadoop.hdds.protocol.proto
      .StorageContainerDatanodeProtocolProtos.StorageReportProto;
  import org.apache.hadoop.hdds.protocol.proto
      .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
@@@ -45,27 -44,20 +46,30 @@@ public class DatanodeInfo extends Datan
  
    private List<StorageReportProto> storageReports;
    private List<MetadataStorageReportProto> metadataStorageReports;
 +  private LayoutVersionProto lastKnownLayoutVersion;
  
+   private NodeStatus nodeStatus;
+ 
    /**
     * Constructs DatanodeInfo from DatanodeDetails.
     *
     * @param datanodeDetails Details about the datanode
 +   * @param layoutInfo Details about the LayoutVersionProto
     */
-   public DatanodeInfo(DatanodeDetails datanodeDetails,
-                       LayoutVersionProto layoutInfo) {
 -  public DatanodeInfo(DatanodeDetails datanodeDetails, NodeStatus nodeStatus) {
++  public DatanodeInfo(DatanodeDetails datanodeDetails, NodeStatus nodeStatus,
++        LayoutVersionProto layoutInfo) {
      super(datanodeDetails);
      this.lock = new ReentrantReadWriteLock();
      this.lastHeartbeatTime = Time.monotonicNow();
 +    lastKnownLayoutVersion =
 +        LayoutVersionProto.newBuilder()
 +            .setMetadataLayoutVersion(layoutInfo != null ?
 +                layoutInfo.getMetadataLayoutVersion() : 0)
 +            .setSoftwareLayoutVersion(layoutInfo != null ?
 +                layoutInfo.getSoftwareLayoutVersion() : 0)
 +            .build();
      this.storageReports = Collections.emptyList();
+     this.nodeStatus = nodeStatus;
      this.metadataStorageReports = Collections.emptyList();
    }
  
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 48c9e04,22a1e81..17bf6b6
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@@ -28,8 -27,8 +28,9 @@@ import org.apache.hadoop.hdds.scm.conta
  import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
  import org.apache.hadoop.hdds.protocol.DatanodeDetails;
  import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
  import org.apache.hadoop.hdds.server.events.EventHandler;
 +import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
  import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
  import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
  import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index a01cca73,d7af7f5..130aa48
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@@ -33,9 -32,9 +32,11 @@@ import java.util.function.Predicate
  
  import org.apache.hadoop.hdds.conf.ConfigurationSource;
  import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
  import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 +import org.apache.hadoop.hdds.protocol.proto
 +    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
  import org.apache.hadoop.hdds.scm.container.ContainerID;
  import org.apache.hadoop.hdds.scm.events.SCMEvents;
  import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap;
@@@ -56,16 -54,9 +57,16 @@@ import org.apache.hadoop.util.concurren
  import com.google.common.annotations.VisibleForTesting;
  import com.google.common.base.Preconditions;
  import com.google.common.util.concurrent.ThreadFactoryBuilder;
 +
 +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONED;
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONING;
 +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
 +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY;
 +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
  import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
  import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
  import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
++import static org.apache.hadoop.hdds.scm.events.SCMEvents.NON_HEALTHY_TO_READONLY_HEALTHY_NODE;
++
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
@@@ -89,8 -80,7 +90,7 @@@ public class NodeStateManager implement
     * Node's life cycle events.
     */
    private enum NodeLifeCycleEvent {
-     TIMEOUT, RESTORE, RESURRECT, DECOMMISSION, DECOMMISSIONED, LAYOUT_MISMATCH,
-     LAYOUT_MATCH
 -    TIMEOUT, RESTORE, RESURRECT
++    TIMEOUT, RESTORE, RESURRECT, LAYOUT_MISMATCH, LAYOUT_MATCH
    }
  
    private static final Logger LOG = LoggerFactory
@@@ -173,11 -157,8 +173,9 @@@
      this.state2EventMap = new HashMap<>();
      initialiseState2EventMap();
      Set<NodeState> finalStates = new HashSet<>();
-     finalStates.add(DECOMMISSIONED);
-     // All DataNodes should start in HealthyReadOnly state.
-     this.stateMachine = new StateMachine<>(NodeState.HEALTHY_READONLY,
 -    this.nodeHealthSM = new StateMachine<>(NodeState.HEALTHY, finalStates);
++    this.nodeHealthSM = new StateMachine<>(NodeState.HEALTHY_READONLY,
 +        finalStates);
-     initializeStateMachine();
+     initializeStateMachines();
      heartbeatCheckerIntervalMs = HddsServerUtil
          .getScmheartbeatCheckerInterval(conf);
      staleNodeIntervalMs = HddsServerUtil.getStaleNodeInterval(conf);
@@@ -201,135 -182,52 +199,74 @@@
     * Populates state2event map.
     */
    private void initialiseState2EventMap() {
 -    state2EventMap.put(NodeState.STALE, SCMEvents.STALE_NODE);
 -    state2EventMap.put(NodeState.DEAD, SCMEvents.DEAD_NODE);
 -    state2EventMap.put(NodeState.HEALTHY,
 -        SCMEvents.NON_HEALTHY_TO_HEALTHY_NODE);
 +    state2EventMap.put(STALE, SCMEvents.STALE_NODE);
 +    state2EventMap.put(DEAD, SCMEvents.DEAD_NODE);
 +    state2EventMap
 +        .put(HEALTHY, SCMEvents.READ_ONLY_HEALTHY_TO_HEALTHY_NODE);
 +    state2EventMap
-         .put(NodeState.HEALTHY_READONLY,
-             SCMEvents.NON_HEALTHY_TO_READONLY_HEALTHY_NODE);
++        .put(NodeState.HEALTHY_READONLY, NON_HEALTHY_TO_READONLY_HEALTHY_NODE);
    }
  
    /*
     *
     * Node and State Transition Mapping:
     *
 -   * State: HEALTHY         -------------------> STALE
 -   * Event:                       TIMEOUT
 +   * State: HEALTHY             -------------------> STALE
 +   * Event:                          TIMEOUT
     *
-    * State: HEALTHY             -------------------> DECOMMISSIONING
-    * Event:                        DECOMMISSION
-    *
 -   * State: STALE           -------------------> DEAD
 -   * Event:                       TIMEOUT
 +   * State: HEALTHY             -------------------> HEALTHY_READONLY
 +   * Event:                       LAYOUT_MISMATCH
 +   *
 +   * State: HEALTHY_READONLY    -------------------> HEALTHY
 +   * Event:                       LAYOUT_MATCH
     *
-    * State: HEALTHY_READONLY    -------------------> DECOMMISSIONING
-    * Event:                        DECOMMISSION
-    *
 -   * State: STALE           -------------------> HEALTHY
 +   * State: HEALTHY_READONLY    -------------------> STALE
 +   * Event:                          TIMEOUT
 +   *
 +   * State: STALE           -------------------> HEALTHY_READONLY
     * Event:                       RESTORE
     *
 -   * State: DEAD            -------------------> HEALTHY
 +   * State: DEAD            -------------------> HEALTHY_READONLY
     * Event:                       RESURRECT
     *
 +   * State: STALE           -------------------> DEAD
 +   * Event:                       TIMEOUT
 +   *
-    * State: STALE           -------------------> DECOMMISSIONING
-    * Event:                     DECOMMISSION
-    *
-    * State: DEAD            -------------------> DECOMMISSIONING
-    * Event:                     DECOMMISSION
-    *
-    * State: DECOMMISSIONING -------------------> DECOMMISSIONED
-    * Event:                     DECOMMISSIONED
-    *
     *  Node State Flow
     *
-    *                                      +->------------------->------+
-    *                                      |                            |
-    *                                      |(DECOMMISSION)              |
-    *                                      ^                            V
-    *                                      |  +-----<---------<---+     |
-    *                                      |  |    (RESURRECT)    |     |
-    *    +-->-----(LAYOUT_MISMATCH)-->--+  |  V                   |     |
-    *    |                              |  |  |                   ^     |
-    *    |                              |  ^  |                   |     |
-    *    |                              V  |  V                   |     |
-    *    |  +-----(LAYOUT_MATCH)--[HEALTHY_READONLY]              |     |
-    *    |  |                            ^  |                     |     V
-    *    |  |                            |  |                     ^     |
-    *    |  |                            |  |(TIMEOUT)            |     |
-    *    ^  |                  (RESTORE) |  |                     |     |
-    *    |  V                            |  V                     |     |
-    * [HEALTHY]---->----------------->[STALE]------->--------->[DEAD]   |
-    *    |           (TIMEOUT)         |         (TIMEOUT)       |      |
-    *    |                             |                         |      |
-    *    V                             |                         V      |
-    *    |                             |                         |      V
-    *    |                             |                         |      |
-    *    |                             |                         |      |
-    *    |(DECOMMISSION)               | (DECOMMISSION)          |(DECOMMISSION)
-    *    |                             V                         |      |
-    *    +---->---------------->[DECOMMISSIONING]<---------------+      |
-    *                                   |   ^                           |
-    *                                   |   |                           V
-    *                                   V   |                           |
-    *                                   |   +-----------<----------<----+
-    *                                   |
-    *                                   |
-    *                                   | (DECOMMISSIONED)
-    *                                   |
-    *                                   V
-    *                          [DECOMMISSIONED]
 -   *  +--------------------------------------------------------+
 -   *  |                                     (RESURRECT)        |
 -   *  |   +--------------------------+                         |
 -   *  |   |      (RESTORE)           |                         |
 -   *  |   |                          |                         |
 -   *  V   V                          |                         |
 -   * [HEALTHY]------------------->[STALE]------------------->[DEAD]
++   *                                        +-----<---------<---+
++   *                                        |    (RESURRECT)    |
++   *    +-->-----(LAYOUT_MISMATCH)-->--+    V                   |
++   *    |                              |    |                   ^
++   *    |                              |    |                   |
++   *    |                              V    V                   |
++   *    |  +-----(LAYOUT_MATCH)--[HEALTHY_READONLY]             |
++   *    |  |                            ^  |                    |
++   *    |  |                            |  |                    ^
++   *    |  |                            |  |(TIMEOUT)           |
++   *    ^  |                  (RESTORE) |  |                    |
++   *    |  V                            |  V                    |
++   * [HEALTHY]---->----------------->[STALE]------->--------->[DEAD]
++   *               (TIMEOUT)                  (TIMEOUT)
     *
     */
  
    /**
     * Initializes the lifecycle of node state machine.
     */
-   private void initializeStateMachine() {
-     stateMachine.addTransition(
-         HEALTHY_READONLY, HEALTHY,
+   private void initializeStateMachines() {
 -    nodeHealthSM.addTransition(
 -        NodeState.HEALTHY, NodeState.STALE, NodeLifeCycleEvent.TIMEOUT);
 -    nodeHealthSM.addTransition(
 -        NodeState.STALE, NodeState.DEAD, NodeLifeCycleEvent.TIMEOUT);
 -    nodeHealthSM.addTransition(
 -        NodeState.STALE, NodeState.HEALTHY, NodeLifeCycleEvent.RESTORE);
 -    nodeHealthSM.addTransition(
 -        NodeState.DEAD, NodeState.HEALTHY, NodeLifeCycleEvent.RESURRECT);
++    nodeHealthSM.addTransition(HEALTHY_READONLY, HEALTHY,
 +        NodeLifeCycleEvent.LAYOUT_MATCH);
-     stateMachine.addTransition(
-         HEALTHY_READONLY, STALE,
++    nodeHealthSM.addTransition(HEALTHY_READONLY, STALE,
 +        NodeLifeCycleEvent.TIMEOUT);
-     stateMachine.addTransition(
-         HEALTHY_READONLY, DECOMMISSIONING,
-         NodeLifeCycleEvent.DECOMMISSION);
-     stateMachine.addTransition(
-         HEALTHY, STALE, NodeLifeCycleEvent.TIMEOUT);
-     stateMachine.addTransition(
-         HEALTHY, HEALTHY_READONLY,
++    nodeHealthSM.addTransition(HEALTHY, STALE, NodeLifeCycleEvent.TIMEOUT);
++    nodeHealthSM.addTransition(HEALTHY, HEALTHY_READONLY,
 +        NodeLifeCycleEvent.LAYOUT_MISMATCH);
-     stateMachine.addTransition(
-         STALE, DEAD, NodeLifeCycleEvent.TIMEOUT);
-     stateMachine.addTransition(
-         STALE, HEALTHY_READONLY,
++    nodeHealthSM.addTransition(STALE, DEAD, NodeLifeCycleEvent.TIMEOUT);
++    nodeHealthSM.addTransition(STALE, HEALTHY_READONLY,
 +        NodeLifeCycleEvent.RESTORE);
-     stateMachine.addTransition(
-         DEAD, HEALTHY_READONLY,
++    nodeHealthSM.addTransition(DEAD, HEALTHY_READONLY,
 +        NodeLifeCycleEvent.RESURRECT);
-     stateMachine.addTransition(
-         HEALTHY, DECOMMISSIONING,
-         NodeLifeCycleEvent.DECOMMISSION);
-     stateMachine.addTransition(
-         STALE, DECOMMISSIONING,
-         NodeLifeCycleEvent.DECOMMISSION);
-     stateMachine.addTransition(
-         DEAD, DECOMMISSIONING,
-         NodeLifeCycleEvent.DECOMMISSION);
-     stateMachine.addTransition(
-         DECOMMISSIONING, DECOMMISSIONED,
-         NodeLifeCycleEvent.DECOMMISSIONED);
- 
    }
  
    /**
@@@ -340,11 -237,10 +277,11 @@@
     *
     * @throws NodeAlreadyExistsException if the node is already present
     */
 -  public void addNode(DatanodeDetails datanodeDetails)
 +  public void addNode(DatanodeDetails datanodeDetails,
 +                      LayoutVersionProto layoutInfo)
        throws NodeAlreadyExistsException {
-     nodeStateMap.addNode(datanodeDetails, stateMachine.getInitialState(),
-         layoutInfo);
+     NodeStatus newNodeStatus = newNodeStatus(datanodeDetails);
 -    nodeStateMap.addNode(datanodeDetails, newNodeStatus);
++    nodeStateMap.addNode(datanodeDetails, newNodeStatus, layoutInfo);
      eventPublisher.fireEvent(SCMEvents.NEW_NODE, datanodeDetails);
    }
  
@@@ -424,10 -328,7 +383,7 @@@
     * @return list of healthy nodes
     */
    public List<DatanodeInfo> getHealthyNodes() {
-     List<DatanodeInfo> allHealthyNodes;
-     allHealthyNodes = getNodes(HEALTHY);
-     allHealthyNodes.addAll(getNodes(NodeState.HEALTHY_READONLY));
-     return allHealthyNodes;
 -    return getNodes(null, NodeState.HEALTHY);
++    return getNodes(null, HEALTHY);
    }
  
    /**
@@@ -636,25 -584,8 +639,26 @@@
      scheduleNextHealthCheck();
    }
  
 +  public void forceNodesToHealthyReadOnly() {
 +    try {
-       List<UUID> nodes = nodeStateMap.getNodes(HEALTHY);
++      List<UUID> nodes = nodeStateMap.getNodes(null, HEALTHY);
 +      for (UUID id : nodes) {
 +        DatanodeInfo node = nodeStateMap.getNodeInfo(id);
-         nodeStateMap.updateNodeState(node.getUuid(), HEALTHY,
++        nodeStateMap.updateNodeHealthState(node.getUuid(),
 +            HEALTHY_READONLY);
 +        if (state2EventMap.containsKey(HEALTHY_READONLY)) {
 +          eventPublisher.fireEvent(state2EventMap.get(HEALTHY_READONLY),
 +              node);
 +        }
 +      }
 +    } catch (NodeNotFoundException ex) {
 +      LOG.error("Inconsistent NodeStateMap! {}", nodeStateMap);
 +      ex.printStackTrace();
 +    }
 +  }
 +
-   private void checkNodesHealth() {
+   @VisibleForTesting
+   public void checkNodesHealth() {
  
      /*
       *
@@@ -695,56 -626,33 +699,49 @@@
          (lastHbTime) -> lastHbTime < healthyNodeDeadline;
      Predicate<Long> deadNodeCondition =
          (lastHbTime) -> lastHbTime < staleNodeDeadline;
 +    Predicate<LayoutVersionProto> layoutMatchCondition =
 +        (layout) -> layout.getMetadataLayoutVersion() ==
 +            layoutVersionManager.getMetadataLayoutVersion();
 +    Predicate<LayoutVersionProto> layoutMisMatchCondition =
 +        (layout) -> layout.getMetadataLayoutVersion() !=
 +            layoutVersionManager.getMetadataLayoutVersion();
      try {
-       for (NodeState state : NodeState.values()) {
-         List<UUID> nodes = nodeStateMap.getNodes(state);
-         for (UUID id : nodes) {
-           DatanodeInfo node = nodeStateMap.getNodeInfo(id);
-           switch (state) {
-           case HEALTHY:
-               // Move the node to STALE if the last heartbeat time is less than
-             // configured stale-node interval.
-             updateNodeLayoutVersionState(node, layoutMisMatchCondition, state,
-                 NodeLifeCycleEvent.LAYOUT_MISMATCH);
-             updateNodeState(node, staleNodeCondition, state,
-                 NodeLifeCycleEvent.TIMEOUT);
-             break;
-           case HEALTHY_READONLY:
-             // Move the node to STALE if the last heartbeat time is less than
-             // configured stale-node interval.
-             updateNodeLayoutVersionState(node, layoutMatchCondition, state,
-                 NodeLifeCycleEvent.LAYOUT_MATCH);
-             updateNodeState(node, staleNodeCondition, state,
-                   NodeLifeCycleEvent.TIMEOUT);
-             break;
-           case STALE:
-             // Move the node to DEAD if the last heartbeat time is less than
-             // configured dead-node interval.
-             updateNodeState(node, deadNodeCondition, state,
-                 NodeLifeCycleEvent.TIMEOUT);
-             // Restore the node if we have received heartbeat before configured
-             // stale-node interval.
-             updateNodeState(node, healthyNodeCondition, state,
-                 NodeLifeCycleEvent.RESTORE);
-             break;
-           case DEAD:
-             // Resurrect the node if we have received heartbeat before
-             // configured stale-node interval.
-             updateNodeState(node, healthyNodeCondition, state,
-                 NodeLifeCycleEvent.RESURRECT);
-             break;
-           // We don't do anything for DECOMMISSIONING and DECOMMISSIONED in
-           // heartbeat processing.
-           case DECOMMISSIONING:
-           case DECOMMISSIONED:
-           default:
-           }
+       for(DatanodeInfo node : nodeStateMap.getAllDatanodeInfos()) {
+         NodeStatus status = nodeStateMap.getNodeStatus(node.getUuid());
+         switch (status.getHealth()) {
+         case HEALTHY:
+           // Move the node to STALE if the last heartbeat time is less than
+           // configured stale-node interval.
++          updateNodeLayoutVersionState(node, layoutMisMatchCondition, status,
++              NodeLifeCycleEvent.LAYOUT_MISMATCH);
++          updateNodeState(node, staleNodeCondition, status,
++              NodeLifeCycleEvent.TIMEOUT);
++          break;
++        case HEALTHY_READONLY:
++          // Move the node to STALE if the last heartbeat time is less than
++          // configured stale-node interval.
++          updateNodeLayoutVersionState(node, layoutMatchCondition, status,
++              NodeLifeCycleEvent.LAYOUT_MATCH);
+           updateNodeState(node, staleNodeCondition, status,
+               NodeLifeCycleEvent.TIMEOUT);
+           break;
+         case STALE:
+           // Move the node to DEAD if the last heartbeat time is less than
+           // configured dead-node interval.
+           updateNodeState(node, deadNodeCondition, status,
+               NodeLifeCycleEvent.TIMEOUT);
+           // Restore the node if we have received heartbeat before configured
+           // stale-node interval.
+           updateNodeState(node, healthyNodeCondition, status,
+               NodeLifeCycleEvent.RESTORE);
+           break;
+         case DEAD:
+           // Resurrect the node if we have received heartbeat before
+           // configured stale-node interval.
+           updateNodeState(node, healthyNodeCondition, status,
+               NodeLifeCycleEvent.RESURRECT);
+           break;
+         default:
          }
        }
      } catch (NodeNotFoundException e) {
@@@ -827,36 -743,6 +832,37 @@@
      }
    }
  
 +  /**
 +   * Updates the node state if the condition satisfies.
 +   *
 +   * @param node DatanodeInfo
 +   * @param condition condition to check
-    * @param state current state of node
++   * @param status current state of node
 +   * @param lifeCycleEvent NodeLifeCycleEvent to be applied if condition
 +   *                       matches
 +   *
 +   * @throws NodeNotFoundException if the node is not present
 +   */
 +  private void updateNodeLayoutVersionState(DatanodeInfo node,
 +                             Predicate<LayoutVersionProto> condition,
-                              NodeState state, NodeLifeCycleEvent lifeCycleEvent)
++                                            NodeStatus status,
++                                            NodeLifeCycleEvent lifeCycleEvent)
 +      throws NodeNotFoundException {
 +    try {
 +      if (condition.test(node.getLastKnownLayoutVersion())) {
-         NodeState newState = stateMachine.getNextState(state, lifeCycleEvent);
-         nodeStateMap.updateNodeState(node.getUuid(), state, newState);
-         if (state2EventMap.containsKey(newState)) {
-           eventPublisher.fireEvent(state2EventMap.get(newState), node);
-         }
++        NodeState newHealthState = nodeHealthSM.getNextState(status.getHealth(),
++            lifeCycleEvent);
++        NodeStatus newStatus =
++            nodeStateMap.updateNodeHealthState(node.getUuid(), newHealthState);
++        fireHealthStateEvent(newStatus.getHealth(), node);
 +      }
 +    } catch (InvalidStateTransitionException e) {
 +      LOG.warn("Invalid state transition of node {}." +
 +              " Current state: {}, life cycle event: {}",
-           node, state, lifeCycleEvent);
++          node, status, lifeCycleEvent);
 +    }
 +  }
 +
    @Override
    public void close() {
      executorService.shutdown();
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
index 0000000,72ca015..4a01607
mode 000000,100644..100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
@@@ -1,0 -1,206 +1,211 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hdds.scm.node;
+ 
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+ 
+ import java.util.Objects;
+ 
+ /**
+  * This class is used to capture the current status of a datanode. This
+  * includes its health (healthy, stale or dead) and its operation status (
+  * in_service, decommissioned and maintenance mode) along with the expiry time
+  * for the operational state (used with maintenance mode).
+  */
+ public class NodeStatus {
+ 
+   private HddsProtos.NodeOperationalState operationalState;
+   private HddsProtos.NodeState health;
+   private long opStateExpiryEpochSeconds;
+ 
+   public NodeStatus(HddsProtos.NodeOperationalState operationalState,
 -             HddsProtos.NodeState health) {
++                    HddsProtos.NodeState health) {
+     this.operationalState = operationalState;
+     this.health = health;
+     this.opStateExpiryEpochSeconds = 0;
+   }
+ 
+   public NodeStatus(HddsProtos.NodeOperationalState operationalState,
+                     HddsProtos.NodeState health,
+                     long opStateExpireEpocSeconds) {
+     this.operationalState = operationalState;
+     this.health = health;
+     this.opStateExpiryEpochSeconds = opStateExpireEpocSeconds;
+   }
+ 
+   public static NodeStatus inServiceHealthy() {
+     return new NodeStatus(HddsProtos.NodeOperationalState.IN_SERVICE,
+         HddsProtos.NodeState.HEALTHY);
+   }
+ 
++  public static NodeStatus inServiceHealthyReadOnly() {
++    return new NodeStatus(HddsProtos.NodeOperationalState.IN_SERVICE,
++        HddsProtos.NodeState.HEALTHY_READONLY);
++  }
++
+   public static NodeStatus inServiceStale() {
+     return new NodeStatus(HddsProtos.NodeOperationalState.IN_SERVICE,
+         HddsProtos.NodeState.STALE);
+   }
+ 
+   public static NodeStatus inServiceDead() {
+     return new NodeStatus(HddsProtos.NodeOperationalState.IN_SERVICE,
+         HddsProtos.NodeState.DEAD);
+   }
+ 
+   public HddsProtos.NodeState getHealth() {
+     return health;
+   }
+ 
+   public HddsProtos.NodeOperationalState getOperationalState() {
+     return operationalState;
+   }
+ 
+   public long getOpStateExpiryEpochSeconds() {
+     return opStateExpiryEpochSeconds;
+   }
+ 
+   public boolean operationalStateExpired() {
+     if (0 == opStateExpiryEpochSeconds) {
+       return false;
+     }
+     return System.currentTimeMillis() / 1000 >= opStateExpiryEpochSeconds;
+   }
+ 
+   /**
+    * Returns true if the nodeStatus indicates the node is in any decommission
+    * state.
+    *
+    * @return True if the node is in any decommission state, false otherwise
+    */
+   public boolean isDecommission() {
+     return operationalState == HddsProtos.NodeOperationalState.DECOMMISSIONING
+         || operationalState == HddsProtos.NodeOperationalState.DECOMMISSIONED;
+   }
+ 
+   /**
+    * Returns true if the node is currently decommissioning.
+    *
+    * @return True if the node is decommissioning, false otherwise
+    */
+   public boolean isDecommissioning() {
+     return operationalState == HddsProtos.NodeOperationalState.DECOMMISSIONING;
+   }
+ 
+   /**
+    * Returns true if the node is decommissioned.
+    *
+    * @return True if the node is decommissioned, false otherwise
+    */
+   public boolean isDecommissioned() {
+     return operationalState == HddsProtos.NodeOperationalState.DECOMMISSIONED;
+   }
+ 
+   /**
+    * Returns true if the node is in any maintenance state.
+    *
+    * @return True if the node is in any maintenance state, false otherwise
+    */
+   public boolean isMaintenance() {
+     return operationalState
+         == HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE
+         || operationalState == HddsProtos.NodeOperationalState.IN_MAINTENANCE;
+   }
+ 
+   /**
+    * Returns true if the node is currently entering maintenance.
+    *
+    * @return True if the node is entering maintenance, false otherwise
+    */
+   public boolean isEnteringMaintenance() {
+     return operationalState
+         == HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE;
+   }
+ 
+   /**
+    * Returns true if the node is currently in maintenance.
+    *
+    * @return True if the node is in maintenance, false otherwise.
+    */
+   public boolean isInMaintenance() {
+     return operationalState == HddsProtos.NodeOperationalState.IN_MAINTENANCE;
+   }
+ 
+   /**
+    * Returns true if the nodeStatus is healthy (ie not stale or dead) and false
+    * otherwise.
+    *
+    * @return True if the node is Healthy, false otherwise
+    */
+   public boolean isHealthy() {
+     return health == HddsProtos.NodeState.HEALTHY;
+   }
+ 
+   /**
+    * Returns true if the nodeStatus is either healthy or stale and false
+    * otherwise.
+    *
+    * @return True is the node is Healthy or Stale, false otherwise.
+    */
+   public boolean isAlive() {
+     return health == HddsProtos.NodeState.HEALTHY
+         || health == HddsProtos.NodeState.STALE;
+   }
+ 
+   /**
+    * Returns true if the nodeStatus is dead and false otherwise.
+    *
+    * @return True is the node is Dead, false otherwise.
+    */
+   public boolean isDead() {
+     return health == HddsProtos.NodeState.DEAD;
+   }
+ 
+   @Override
+   public boolean equals(Object obj) {
+     if (this == obj) {
+       return true;
+     }
+     if (obj == null) {
+       return false;
+     }
+     if (getClass() != obj.getClass()) {
+       return false;
+     }
+     NodeStatus other = (NodeStatus) obj;
+     if (this.operationalState == other.operationalState &&
+         this.health == other.health
+         && this.opStateExpiryEpochSeconds == other.opStateExpiryEpochSeconds) {
+       return true;
+     }
+     return false;
+   }
+ 
+   @Override
+   public int hashCode() {
+     return Objects.hash(health, operationalState, opStateExpiryEpochSeconds);
+   }
+ 
+   @Override
+   public String toString() {
+     return "OperationalState: "+operationalState+" Health: "+health+
+         " OperastionStateExpiry: "+opStateExpiryEpochSeconds;
+   }
+ 
 -}
++}
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 51c84dc,fcd72cf..2c764c3
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@@ -17,6 -17,6 +17,9 @@@
   */
  package org.apache.hadoop.hdds.scm.node;
  
++import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
++import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY;
++
  import javax.management.ObjectName;
  import java.io.IOException;
  import java.net.InetAddress;
@@@ -36,9 -36,8 +39,10 @@@ import org.apache.hadoop.hdds.DFSConfig
  import org.apache.hadoop.hdds.conf.OzoneConfiguration;
  import org.apache.hadoop.hdds.protocol.DatanodeDetails;
  import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
  import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 +import org.apache.hadoop.hdds.protocol.proto
 +    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
  import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
  import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
  import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode;
@@@ -66,9 -63,9 +70,10 @@@ import org.apache.hadoop.net.TableMappi
  import org.apache.hadoop.ozone.OzoneConsts;
  import org.apache.hadoop.ozone.protocol.VersionResponse;
  import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 +import org.apache.hadoop.ozone.protocol.commands.FinalizeNewLayoutVersionCommand;
  import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
  import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+ import org.apache.hadoop.ozone.protocol.commands.SetNodeOperationalStateCommand;
  import org.apache.hadoop.util.ReflectionUtils;
  
  import com.google.common.annotations.VisibleForTesting;
@@@ -355,9 -389,8 +417,10 @@@ public class SCMNodeManager implements 
          "DatanodeDetails.");
      try {
        nodeStateManager.updateLastHeartbeatTime(datanodeDetails);
 +      nodeStateManager.updateLastKnownLayoutVersion(datanodeDetails,
 +          layoutInfo);
        metrics.incNumHBProcessed();
+       updateDatanodeOpState(datanodeDetails);
      } catch (NodeNotFoundException e) {
        metrics.incNumHBProcessingFailed();
        LOG.error("SCM trying to process heartbeat from an " +
@@@ -491,14 -507,11 +590,14 @@@
  
      final Map<DatanodeDetails, SCMNodeStat> nodeStats = new HashMap<>();
  
 -    final List<DatanodeInfo> healthyNodes =  nodeStateManager
 -        .getHealthyNodes();
 +    final List<DatanodeInfo> healthyNodes = nodeStateManager
-         .getNodes(NodeState.HEALTHY);
++        .getNodes(null, HEALTHY);
 +    final List<DatanodeInfo> healthyReadOnlyNodes = nodeStateManager
-         .getNodes(NodeState.HEALTHY_READONLY);
++        .getNodes(null, HEALTHY_READONLY);
      final List<DatanodeInfo> staleNodes = nodeStateManager
-         .getNodes(NodeState.STALE);
+         .getStaleNodes();
      final List<DatanodeInfo> datanodes = new ArrayList<>(healthyNodes);
 +    datanodes.addAll(healthyReadOnlyNodes);
      datanodes.addAll(staleNodes);
  
      for (DatanodeInfo dnInfo : datanodes) {
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
index 111c546,5ea4d91..6eb7359
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
@@@ -32,13 -33,7 +33,8 @@@ import org.apache.hadoop.metrics2.lib.I
  import org.apache.hadoop.metrics2.lib.MetricsRegistry;
  import org.apache.hadoop.metrics2.lib.MutableCounterLong;
  import org.apache.hadoop.ozone.OzoneConsts;
 +
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONED;
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONING;
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY;
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
+ import org.apache.hadoop.util.StringUtils;
  
  /**
   * This class maintains Node related metrics.
@@@ -119,48 -116,58 +117,58 @@@ public final class SCMNodeMetrics imple
    @Override
    @SuppressWarnings("SuspiciousMethodCalls")
    public void getMetrics(MetricsCollector collector, boolean all) {
-     Map<String, Integer> nodeCount = managerMXBean.getNodeCount();
+     Map<String, Map<String, Integer>> nodeCount = managerMXBean.getNodeCount();
      Map<String, Long> nodeInfo = managerMXBean.getNodeInfo();
-     registry.snapshot(
-         collector.addRecord(registry.info()) // Add annotated ones first
-             .addGauge(Interns.info(
-                 "HealthyNodes",
-                 "Number of healthy datanodes"),
-                 nodeCount.get(HEALTHY.toString()))
-             .addGauge(Interns.info(
-                 "HealthyReadOnlyNodes",
-                 "Number of healthy and read only datanodes"),
-                 nodeCount.get(HEALTHY_READONLY.toString()))
-             .addGauge(Interns.info("StaleNodes",
-                 "Number of stale datanodes"),
-                 nodeCount.get(STALE.toString()))
-             .addGauge(Interns.info("DeadNodes",
-                 "Number of dead datanodes"),
-                 nodeCount.get(DEAD.toString()))
-             .addGauge(Interns.info("DecommissioningNodes",
-                 "Number of decommissioning datanodes"),
-                 nodeCount.get(DECOMMISSIONING.toString()))
-             .addGauge(Interns.info("DecommissionedNodes",
-                 "Number of decommissioned datanodes"),
-                 nodeCount.get(DECOMMISSIONED.toString()))
-             .addGauge(Interns.info("DiskCapacity",
-                 "Total disk capacity"),
-                 nodeInfo.get("DISKCapacity"))
-             .addGauge(Interns.info("DiskUsed",
-                 "Total disk capacity used"),
-                 nodeInfo.get("DISKUsed"))
-             .addGauge(Interns.info("DiskRemaining",
-                 "Total disk capacity remaining"),
-                 nodeInfo.get("DISKRemaining"))
-             .addGauge(Interns.info("SSDCapacity",
-                 "Total ssd capacity"),
-                 nodeInfo.get("SSDCapacity"))
-             .addGauge(Interns.info("SSDUsed",
-                 "Total ssd capacity used"),
-                 nodeInfo.get("SSDUsed"))
-             .addGauge(Interns.info("SSDRemaining",
-                 "Total disk capacity remaining"),
-                 nodeInfo.get("SSDRemaining")),
-         all);
 -
+     /**
+      * Loop over the Node map and create a metric for the cross product of all
+      * Operational and health states, ie:
+      *     InServiceHealthy
+      *     InServiceStale
+      *     ...
+      *     EnteringMaintenanceHealthy
+      *     ...
+      */
+     MetricsRecordBuilder metrics = collector.addRecord(registry.info());
+     for(Map.Entry<String, Map<String, Integer>> e : nodeCount.entrySet()) {
+       for(Map.Entry<String, Integer> h : e.getValue().entrySet()) {
+         metrics.addGauge(
+             Interns.info(
+                 StringUtils.camelize(e.getKey()+"_"+h.getKey()+"_nodes"),
+                 "Number of "+e.getKey()+" "+h.getKey()+" datanodes"),
+             h.getValue());
+       }
+     }
+ 
+     for (Map.Entry<String, Long> e : nodeInfo.entrySet()) {
+       metrics.addGauge(
+           Interns.info(e.getKey(), diskMetricDescription(e.getKey())),
+           e.getValue());
+     }
+     registry.snapshot(metrics, all);
+   }
+ 
+   private String diskMetricDescription(String metric) {
+     StringBuilder sb = new StringBuilder();
+     sb.append("Total");
+     if (metric.indexOf("Maintenance") >= 0) {
+       sb.append(" maintenance");
+     } else if (metric.indexOf("Decommissioned") >= 0) {
+       sb.append(" decommissioned");
+     }
+     if (metric.indexOf("DiskCapacity") >= 0) {
+       sb.append(" disk capacity");
+     } else if (metric.indexOf("DiskUsed") >= 0) {
+       sb.append(" disk capacity used");
+     } else if (metric.indexOf("DiskRemaining") >= 0) {
+       sb.append(" disk capacity remaining");
+     } else if (metric.indexOf("SSDCapacity") >= 0) {
+       sb.append(" SSD capacity");
+     } else if (metric.indexOf("SSDUsed") >= 0) {
+       sb.append(" SSD capacity used");
+     } else if (metric.indexOf("SSDRemaining") >= 0) {
+       sb.append(" SSD capacity remaining");
+     }
+     return sb.toString();
    }
  }
++
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
index 3494b03,9b6e0e0..0a3e137
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
@@@ -18,17 -18,23 +18,25 @@@
  
  package org.apache.hadoop.hdds.scm.node.states;
  
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Set;
+ import java.util.UUID;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.locks.ReadWriteLock;
+ import java.util.concurrent.locks.ReentrantReadWriteLock;
+ import java.util.stream.Collectors;
+ 
  import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
  import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 +import org.apache.hadoop.hdds.protocol.proto
 +    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
  import org.apache.hadoop.hdds.scm.container.ContainerID;
  import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
- 
- import java.util.*;
- import java.util.concurrent.ConcurrentHashMap;
- import java.util.concurrent.locks.ReadWriteLock;
- import java.util.concurrent.locks.ReentrantReadWriteLock;
+ import org.apache.hadoop.hdds.scm.node.NodeStatus;
  
  /**
   * Maintains the state of datanodes in SCM. This class should only be used by
@@@ -77,13 -67,11 +69,14 @@@ public class NodeStateMap 
     * Adds a node to NodeStateMap.
     *
     * @param datanodeDetails DatanodeDetails
-    * @param nodeState initial NodeState
+    * @param nodeStatus initial NodeStatus
 +   * @param layoutInfo initial LayoutVersionProto
     *
     * @throws NodeAlreadyExistsException if the node already exist
     */
-   public void addNode(DatanodeDetails datanodeDetails, NodeState nodeState,
 -  public void addNode(DatanodeDetails datanodeDetails, NodeStatus nodeStatus)
++  public void addNode(DatanodeDetails datanodeDetails, NodeStatus nodeStatus,
 +                      LayoutVersionProto layoutInfo)
++
        throws NodeAlreadyExistsException {
      lock.writeLock().lock();
      try {
@@@ -91,9 -79,8 +84,9 @@@
        if (nodeMap.containsKey(id)) {
          throw new NodeAlreadyExistsException("Node UUID: " + id);
        }
-       nodeMap.put(id, new DatanodeInfo(datanodeDetails, layoutInfo));
-       nodeToContainer.put(id, ConcurrentHashMap.newKeySet());
-       stateMap.get(nodeState).add(id);
 -      nodeMap.put(id, new DatanodeInfo(datanodeDetails, nodeStatus));
++      nodeMap.put(id, new DatanodeInfo(datanodeDetails, nodeStatus,
++          layoutInfo));
+       nodeToContainer.put(id, new HashSet<>());
      } finally {
        lock.writeLock().unlock();
      }
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index 9285555,5dfcc3c..0442908
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@@ -279,21 -281,27 +289,42 @@@ public final class StorageContainerLoca
              .setGetSafeModeRuleStatusesResponse(getSafeModeRuleStatues(
                  request.getGetSafeModeRuleStatusesRequest()))
              .build();
+       case DecommissionNodes:
+         return ScmContainerLocationResponse.newBuilder()
+             .setCmdType(request.getCmdType())
+             .setStatus(Status.OK)
+             .setDecommissionNodesResponse(decommissionNodes(
+                 request.getDecommissionNodesRequest()))
+             .build();
+       case RecommissionNodes:
+         return ScmContainerLocationResponse.newBuilder()
+             .setCmdType(request.getCmdType())
+             .setStatus(Status.OK)
+             .setRecommissionNodesResponse(recommissionNodes(
+                 request.getRecommissionNodesRequest()))
+             .build();
+       case StartMaintenanceNodes:
+         return ScmContainerLocationResponse.newBuilder()
+             .setCmdType(request.getCmdType())
+             .setStatus(Status.OK)
+             .setStartMaintenanceNodesResponse(startMaintenanceNodes(
+                 request.getStartMaintenanceNodesRequest()))
+           .build();
 +      case FinalizeScmUpgrade:
 +        return ScmContainerLocationResponse.newBuilder()
 +            .setCmdType(request.getCmdType())
 +            .setStatus(Status.OK)
 +            .setFinalizeScmUpgradeResponse(getFinalizeScmUpgrade(
 +                request.getFinalizeScmUpgradeRequest()))
 +            .build();
 +      case QueryUpgradeFinalizationProgress:
 +        return ScmContainerLocationResponse.newBuilder()
 +            .setCmdType(request.getCmdType())
 +            .setStatus(Status.OK)
 +            .setQueryUpgradeFinalizationProgressResponse(
 +                getQueryUpgradeFinalizationProgress(
-                 request.getQueryUpgradeFinalizationProgressRequest()))
++                    request.getQueryUpgradeFinalizationProgressRequest()))
 +            .build();
        default:
          throw new IllegalArgumentException(
              "Unknown command type: " + request.getCmdType());
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 87a3462,c2dafcb..d397a87
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@@ -86,9 -84,9 +87,10 @@@ import static org.apache.hadoop.hdds.pr
  import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.createPipelineCommand;
  import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteBlocksCommand;
  import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteContainerCommand;
 +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.finalizeNewLayoutVersionCommand;
  import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.replicateContainerCommand;
  import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand;
+ import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.setNodeOperationalStateCommand;
  import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
  import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
  import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY;
@@@ -350,12 -346,12 +352,18 @@@ public class SCMDatanodeProtocolServer 
            .setClosePipelineCommandProto(
                ((ClosePipelineCommand)cmd).getProto())
            .build();
+     case setNodeOperationalStateCommand:
+       return builder
 -          .setCommandType(setNodeOperationalStateCommand)
 -          .setSetNodeOperationalStateCommandProto(
 -              ((SetNodeOperationalStateCommand)cmd).getProto())
 -          .build();
++            .setCommandType(setNodeOperationalStateCommand)
++            .setSetNodeOperationalStateCommandProto(
++                ((SetNodeOperationalStateCommand)cmd).getProto())
++            .build();
 +    case finalizeNewLayoutVersionCommand:
 +      return builder
 +            .setCommandType(finalizeNewLayoutVersionCommand)
 +            .setFinalizeNewLayoutVersionCommandProto(
 +                ((FinalizeNewLayoutVersionCommand)cmd).getProto())
 +            .build();
      default:
        throw new IllegalArgumentException("Scm command " +
            cmd.getType().toString() + " is not implemented");
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index d77524b,6e214c2..2399f85
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@@ -76,14 -73,13 +75,16 @@@ import org.apache.hadoop.hdds.scm.net.N
  import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
  import org.apache.hadoop.hdds.scm.node.DeadNodeHandler;
  import org.apache.hadoop.hdds.scm.node.NewNodeHandler;
+ import org.apache.hadoop.hdds.scm.node.StartDatanodeAdminHandler;
 -import org.apache.hadoop.hdds.scm.node.NonHealthyToHealthyNodeHandler;
  import org.apache.hadoop.hdds.scm.node.NodeManager;
  import org.apache.hadoop.hdds.scm.node.NodeReportHandler;
 +import org.apache.hadoop.hdds.scm.node.NonHealthyToReadOnlyHealthyNodeHandler;
 +import org.apache.hadoop.hdds.scm.node.ReadOnlyHealthyToHealthyNodeHandler;
  import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
  import org.apache.hadoop.hdds.scm.node.StaleNodeHandler;
 +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 +import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+ import org.apache.hadoop.hdds.scm.node.NodeDecommissionManager;
  import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler;
  import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
  import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler;
@@@ -126,9 -117,8 +127,10 @@@ import com.google.common.cache.Cache
  import com.google.common.cache.CacheBuilder;
  import com.google.common.cache.RemovalListener;
  import com.google.protobuf.BlockingService;
+ import org.apache.commons.lang3.tuple.Pair;
  import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT;
 +import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.CLOSED;
 +
  import org.apache.ratis.grpc.GrpcTlsConfig;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
@@@ -315,12 -301,10 +319,14 @@@ public final class StorageContainerMana
          new StaleNodeHandler(scmNodeManager, pipelineManager, conf);
      DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
          pipelineManager, containerManager);
+     StartDatanodeAdminHandler datanodeStartAdminHandler =
+         new StartDatanodeAdminHandler(scmNodeManager, pipelineManager);
 -    NonHealthyToHealthyNodeHandler nonHealthyToHealthyNodeHandler =
 -        new NonHealthyToHealthyNodeHandler(pipelineManager, conf);
 +    ReadOnlyHealthyToHealthyNodeHandler readOnlyHealthyToHealthyNodeHandler =
 +        new ReadOnlyHealthyToHealthyNodeHandler(pipelineManager, conf);
 +    NonHealthyToReadOnlyHealthyNodeHandler
 +        nonHealthyToReadOnlyHealthyNodeHandler =
 +        new NonHealthyToReadOnlyHealthyNodeHandler(scmNodeManager,
 +            pipelineManager, conf);
      ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
      PendingDeleteHandler pendingDeleteHandler =
          new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService());
@@@ -358,11 -341,11 +363,13 @@@
      eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
      eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler);
      eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler);
 -    eventQueue.addHandler(SCMEvents.NON_HEALTHY_TO_HEALTHY_NODE,
 -        nonHealthyToHealthyNodeHandler);
 +    eventQueue.addHandler(SCMEvents.READ_ONLY_HEALTHY_TO_HEALTHY_NODE,
 +        readOnlyHealthyToHealthyNodeHandler);
 +    eventQueue.addHandler(SCMEvents.NON_HEALTHY_TO_READONLY_HEALTHY_NODE,
 +        nonHealthyToReadOnlyHealthyNodeHandler);
      eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
+     eventQueue.addHandler(SCMEvents.START_ADMIN_ON_NODE,
+         datanodeStartAdminHandler);
      eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler);
      eventQueue
          .addHandler(SCMEvents.PENDING_DELETE_STATUS, pendingDeleteHandler);
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
index 0000000,7f0d651..a67115a
mode 000000,100644..100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
@@@ -1,0 -1,323 +1,337 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  * <p>
+  * http://www.apache.org/licenses/LICENSE-2.0
+  * <p>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hdds.scm.container;
+ 
+ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
++import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
+ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
++import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
++import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
++import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+ import org.apache.hadoop.hdds.scm.net.NetworkTopology;
+ import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
+ import org.apache.hadoop.hdds.scm.node.NodeManager;
+ import org.apache.hadoop.hdds.scm.node.NodeStatus;
+ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+ import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+ import org.apache.hadoop.hdds.server.events.EventPublisher;
+ import org.apache.hadoop.ozone.protocol.VersionResponse;
+ import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+ import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
+ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+ 
+ import java.io.IOException;
+ import java.util.*;
+ import java.util.concurrent.ConcurrentHashMap;
+ 
+ /**
+  * Basic implementation of the NodeManager interface which can be used in tests.
+  *
+  * TODO - Merge the functionality with MockNodeManager, as it needs refactored
+  *        after the introduction of decommission and maintenance states.
+  */
+ public class SimpleMockNodeManager implements NodeManager {
+ 
+   private Map<UUID, DatanodeInfo> nodeMap = new ConcurrentHashMap<>();
+   private Map<UUID, Set<PipelineID>> pipelineMap = new ConcurrentHashMap<>();
+   private Map<UUID, Set<ContainerID>> containerMap = new ConcurrentHashMap<>();
+ 
+   public void register(DatanodeDetails dd, NodeStatus status) {
+     dd.setPersistedOpState(status.getOperationalState());
+     dd.setPersistedOpStateExpiryEpochSec(status.getOpStateExpiryEpochSeconds());
 -    nodeMap.put(dd.getUuid(), new DatanodeInfo(dd, status));
++    nodeMap.put(dd.getUuid(), new DatanodeInfo(dd, status, null));
+   }
+ 
+   public void setNodeStatus(DatanodeDetails dd, NodeStatus status) {
+     dd.setPersistedOpState(status.getOperationalState());
+     dd.setPersistedOpStateExpiryEpochSec(status.getOpStateExpiryEpochSeconds());
+     DatanodeInfo dni = nodeMap.get(dd.getUuid());
+     dni.setNodeStatus(status);
+   }
+ 
+   /**
+    * Set the number of pipelines for the given node. This simply generates
+    * new PipelineID objects and places them in a set. No actual pipelines are
+    * created.
+    *
+    * Setting the count to zero effectively deletes the pipelines for the node
+    *
+    * @param dd The DatanodeDetails for which to create the pipelines
+    * @param count The number of pipelines to create or zero to delete all
+    *              pipelines
+    */
+   public void setPipelines(DatanodeDetails dd, int count) {
+     Set<PipelineID> pipelines = new HashSet<>();
+     for (int i=0; i<count; i++) {
+       pipelines.add(PipelineID.randomId());
+     }
+     pipelineMap.put(dd.getUuid(), pipelines);
+   }
+ 
+   /**
+    * If the given node was registed with the nodeManager, return the
+    * NodeStatus for the node. Otherwise return a NodeStatus of "In Service
+    * and Healthy".
+    * @param datanodeDetails DatanodeDetails
+    * @return The NodeStatus of the node if it is registered, otherwise an
+    *         Inservice and Healthy NodeStatus.
+    */
+   @Override
+   public NodeStatus getNodeStatus(DatanodeDetails datanodeDetails)
+       throws NodeNotFoundException {
+     DatanodeInfo dni = nodeMap.get(datanodeDetails.getUuid());
+     if (dni != null) {
+       return dni.getNodeStatus();
+     } else {
+       return NodeStatus.inServiceHealthy();
+     }
+   }
+ 
+   @Override
+   public void setNodeOperationalState(DatanodeDetails dn,
 -      HddsProtos.NodeOperationalState newState) throws NodeNotFoundException {
++                                      NodeOperationalState newState)
++      throws NodeNotFoundException {
+     setNodeOperationalState(dn, newState, 0);
+   }
+ 
+   @Override
+   public void setNodeOperationalState(DatanodeDetails dn,
 -      HddsProtos.NodeOperationalState newState, long opStateExpiryEpocSec)
++                                      NodeOperationalState newState,
++                                      long opStateExpiryEpocSec)
+       throws NodeNotFoundException {
+     DatanodeInfo dni = nodeMap.get(dn.getUuid());
+     if (dni == null) {
+       throw new NodeNotFoundException();
+     }
+     dni.setNodeStatus(
+         new NodeStatus(
+             newState, dni.getNodeStatus().getHealth(), opStateExpiryEpocSec));
+   }
+ 
+   /**
+    * Return the set of PipelineID associated with the given DatanodeDetails.
+    *
+    * If there are no pipelines, null is return, to mirror the behaviour of
+    * SCMNodeManager.
+    *
+    * @param datanodeDetails The datanode for which to return the pipelines
+    * @return A set of PipelineID or null if there are none
+    */
+   @Override
+   public Set<PipelineID> getPipelines(DatanodeDetails datanodeDetails) {
+     Set<PipelineID> p = pipelineMap.get(datanodeDetails.getUuid());
+     if (p == null || p.size() == 0) {
+       return null;
+     } else {
+       return p;
+     }
+   }
+ 
+   @Override
+   public int getPipelinesCount(DatanodeDetails datanodeDetails) {
+     return 0;
+   }
+ 
+   @Override
+   public void setContainers(DatanodeDetails dn,
 -      Set<ContainerID> containerIds) throws NodeNotFoundException {
++                            Set<ContainerID> containerIds)
++      throws NodeNotFoundException {
+     containerMap.put(dn.getUuid(), containerIds);
+   }
+ 
+   /**
+    * Return the set of ContainerID associated with the datanode. If there are no
+    * container present, an empty set is return to mirror the behaviour of
+    * SCMNodeManaer
+    *
+    * @param dn The datanodeDetails for which to return the containers
+    * @return A Set of ContainerID or an empty Set if none are present
+    * @throws NodeNotFoundException
+    */
+   @Override
+   public Set<ContainerID> getContainers(DatanodeDetails dn)
+       throws NodeNotFoundException {
+     // The concrete implementation of this method in SCMNodeManager will return
+     // an empty set if there are no containers, and will never return null.
+     return containerMap
+         .computeIfAbsent(dn.getUuid(), key -> new HashSet<>());
+   }
+ 
+   /**
+    * Below here, are all auto-generate placeholder methods to implement the
+    * interface.
+    */
+ 
+   @Override
+   public List<DatanodeDetails> getNodes(NodeStatus nodeStatus) {
+     return null;
+   }
+ 
+   @Override
+   public List<DatanodeDetails> getNodes(
 -      HddsProtos.NodeOperationalState opState, HddsProtos.NodeState health) {
++      NodeOperationalState opState, HddsProtos.NodeState health) {
+     return null;
+   }
+ 
+   @Override
+   public int getNodeCount(NodeStatus nodeStatus) {
+     return 0;
+   }
+ 
+   @Override
 -  public int getNodeCount(HddsProtos.NodeOperationalState opState,
++  public int getNodeCount(NodeOperationalState opState,
+                           HddsProtos.NodeState health) {
+     return 0;
+   }
+ 
+   @Override
+   public List<DatanodeDetails> getAllNodes() {
+     return null;
+   }
+ 
+   @Override
+   public SCMNodeStat getStats() {
+     return null;
+   }
+ 
+   @Override
+   public Map<DatanodeDetails, SCMNodeStat> getNodeStats() {
+     return null;
+   }
+ 
+   @Override
+   public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) {
+     return null;
+   }
+ 
+   @Override
+   public void addPipeline(Pipeline pipeline) {
+   }
+ 
+   @Override
+   public void removePipeline(Pipeline pipeline) {
+   }
+ 
+   @Override
+   public void addContainer(DatanodeDetails datanodeDetails,
 -      ContainerID containerId) throws NodeNotFoundException {
++                           ContainerID containerId)
++      throws NodeNotFoundException {
+   }
+ 
+ 
+ 
+   @Override
+   public void addDatanodeCommand(UUID dnId, SCMCommand command) {
+   }
+ 
+   @Override
+   public void processNodeReport(DatanodeDetails datanodeDetails,
 -      StorageContainerDatanodeProtocolProtos.NodeReportProto nodeReport) {
++                                NodeReportProto nodeReport) {
++  }
++
++  @Override
++  public void processLayoutVersionReport(DatanodeDetails datanodeDetails,
++                                         LayoutVersionProto layoutReport) {
+   }
+ 
+   @Override
+   public List<SCMCommand> getCommandQueue(UUID dnID) {
+     return null;
+   }
+ 
+   @Override
+   public DatanodeDetails getNodeByUuid(String uuid) {
+     return null;
+   }
+ 
+   @Override
+   public List<DatanodeDetails> getNodesByAddress(String address) {
+     return null;
+   }
+ 
+   @Override
+   public NetworkTopology getClusterNetworkTopologyMap() {
+     return null;
+   }
+ 
+   @Override
+   public int minHealthyVolumeNum(List<DatanodeDetails> dnList) {
+     return 0;
+   }
+ 
+   @Override
+   public int pipelineLimit(DatanodeDetails dn) {
+     return 1;
+   }
+ 
+   @Override
+   public int minPipelineLimit(List<DatanodeDetails> dn) {
+     return 0;
+   }
+ 
+   @Override
+   public void close() throws IOException {
+ 
+   }
+ 
+   @Override
+   public Map<String, Map<String, Integer>> getNodeCount() {
+     return null;
+   }
+ 
+   @Override
+   public Map<String, Long> getNodeInfo() {
+     return null;
+   }
+ 
+   @Override
+   public void onMessage(CommandForDatanode commandForDatanode,
+                         EventPublisher publisher) {
+   }
+ 
+   @Override
+   public VersionResponse getVersion(
+       StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto
+           versionRequest) {
+     return null;
+   }
+ 
+   @Override
+   public RegisteredCommand register(DatanodeDetails datanodeDetails,
 -      StorageContainerDatanodeProtocolProtos.NodeReportProto nodeReport,
 -      StorageContainerDatanodeProtocolProtos.PipelineReportsProto
 -      pipelineReport) {
++                                    NodeReportProto nodeReport,
++                                    PipelineReportsProto pipelineReport,
++                                    LayoutVersionProto layoutreport) {
+     return null;
+   }
+ 
+   @Override
 -  public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails) {
++  public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails,
++                                           LayoutVersionProto layoutInfo) {
+     return null;
+   }
+ 
+   @Override
+   public Boolean isNodeRegistered(DatanodeDetails datanodeDetails) {
+     return false;
+   }
+ 
 -}
++}
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
index 0000000,07ef0b7..8d4b3b1
mode 000000,100644..100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
@@@ -1,0 -1,297 +1,297 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  * <p>
+  * http://www.apache.org/licenses/LICENSE-2.0
+  * <p>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hdds.scm.node;
+ 
+ import org.apache.hadoop.hdds.HddsConfigKeys;
+ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+ import org.apache.hadoop.hdds.scm.HddsTestUtils;
+ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+ import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+ import org.apache.hadoop.security.authentication.client.AuthenticationException;
+ import org.apache.hadoop.test.GenericTestUtils;
+ import org.junit.Before;
+ import org.junit.Test;
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.UUID;
+ import java.util.Arrays;
+ import java.util.ArrayList;
+ import static junit.framework.TestCase.assertEquals;
+ import static org.assertj.core.api.Fail.fail;
+ import static org.junit.Assert.assertNotEquals;
+ 
+ /**
+  * Unit tests for the decommision manager.
+  */
+ 
+ public class TestNodeDecommissionManager {
+ 
+   private NodeDecommissionManager decom;
+   private StorageContainerManager scm;
+   private NodeManager nodeManager;
+   private OzoneConfiguration conf;
+   private String storageDir;
+ 
+   @Before
+   public void setup() throws Exception {
+     conf = new OzoneConfiguration();
+     storageDir = GenericTestUtils.getTempPath(
+         TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
+     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
+     nodeManager = createNodeManager(conf);
+     decom = new NodeDecommissionManager(
+         conf, nodeManager, null, null, null);
+   }
+ 
+   @Test
+   public void testHostStringsParseCorrectly()
+       throws InvalidHostStringException {
+     NodeDecommissionManager.HostDefinition def =
+         new NodeDecommissionManager.HostDefinition("foobar");
+     assertEquals("foobar", def.getHostname());
+     assertEquals(-1, def.getPort());
+ 
+     def = new NodeDecommissionManager.HostDefinition(" foobar ");
+     assertEquals("foobar", def.getHostname());
+     assertEquals(-1, def.getPort());
+ 
+     def = new NodeDecommissionManager.HostDefinition("foobar:1234");
+     assertEquals("foobar", def.getHostname());
+     assertEquals(1234, def.getPort());
+ 
+     def = new NodeDecommissionManager.HostDefinition(
+         "foobar.mycompany.com:1234");
+     assertEquals("foobar.mycompany.com", def.getHostname());
+     assertEquals(1234, def.getPort());
+ 
+     try {
+       new NodeDecommissionManager.HostDefinition("foobar:abcd");
+       fail("InvalidHostStringException should have been thrown");
+     } catch (InvalidHostStringException e) {
+     }
+   }
+ 
+   @Test
+   public void testAnyInvalidHostThrowsException()
+       throws InvalidHostStringException{
+     List<DatanodeDetails> dns = generateDatanodes();
+ 
+     // Try to decommission a host that does exist, but give incorrect port
+     try {
+       decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()+":10"));
+       fail("InvalidHostStringException expected");
+     } catch (InvalidHostStringException e) {
+     }
+ 
+     // Try to decommission a host that does not exist
+     try {
+       decom.decommissionNodes(Arrays.asList("123.123.123.123"));
+       fail("InvalidHostStringException expected");
+     } catch (InvalidHostStringException e) {
+     }
+ 
+     // Try to decommission a host that does exist and a host that does not
+     try {
+       decom.decommissionNodes(Arrays.asList(
+           dns.get(1).getIpAddress(), "123,123,123,123"));
+       fail("InvalidHostStringException expected");
+     } catch (InvalidHostStringException e) {
+     }
+ 
+     // Try to decommission a host with many DNs on the address with no port
+     try {
+       decom.decommissionNodes(Arrays.asList(
+           dns.get(0).getIpAddress()));
+       fail("InvalidHostStringException expected");
+     } catch (InvalidHostStringException e) {
+     }
+ 
+     // Try to decommission a host with many DNs on the address with a port
+     // that does not exist
+     try {
+       decom.decommissionNodes(Arrays.asList(
+           dns.get(0).getIpAddress()+":10"));
+       fail("InvalidHostStringException expected");
+     } catch (InvalidHostStringException e) {
+     }
+   }
+ 
+   @Test
+   public void testNodesCanBeDecommissionedAndRecommissioned()
+       throws InvalidHostStringException, NodeNotFoundException {
+     List<DatanodeDetails> dns = generateDatanodes();
+ 
+     // Decommission 2 valid nodes
+     decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(),
+         dns.get(2).getIpAddress()));
+     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+         nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+         nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+ 
+     // Running the command again gives no error - nodes already decommissioning
+     // are silently ignored.
+     decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(),
+         dns.get(2).getIpAddress()));
+ 
+     // Attempt to decommission dn(10) which has multiple hosts on the same IP
+     // and we hardcoded ports to 3456, 4567, 5678
+     DatanodeDetails multiDn = dns.get(10);
+     String multiAddr =
+         multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue();
+     decom.decommissionNodes(Arrays.asList(multiAddr));
+     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+         nodeManager.getNodeStatus(multiDn).getOperationalState());
+ 
+     // Recommission all 3 hosts
+     decom.recommissionNodes(Arrays.asList(
+         multiAddr, dns.get(1).getIpAddress(), dns.get(2).getIpAddress()));
+     decom.getMonitor().run();
+     assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+         nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+     assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+         nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+     assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+         nodeManager.getNodeStatus(dns.get(10)).getOperationalState());
+   }
+ 
+   @Test
+   public void testNodesCanBePutIntoMaintenanceAndRecommissioned()
+       throws InvalidHostStringException, NodeNotFoundException {
+     List<DatanodeDetails> dns = generateDatanodes();
+ 
+     // Put 2 valid nodes into maintenance
+     decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(),
+         dns.get(2).getIpAddress()), 100);
+     assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+         nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+     assertNotEquals(0, nodeManager.getNodeStatus(
+         dns.get(1)).getOpStateExpiryEpochSeconds());
+     assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+         nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+     assertNotEquals(0, nodeManager.getNodeStatus(
+         dns.get(2)).getOpStateExpiryEpochSeconds());
+ 
+     // Running the command again gives no error - nodes already decommissioning
+     // are silently ignored.
+     decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(),
+         dns.get(2).getIpAddress()), 100);
+ 
+     // Attempt to decommission dn(10) which has multiple hosts on the same IP
+     // and we hardcoded ports to 3456, 4567, 5678
+     DatanodeDetails multiDn = dns.get(10);
+     String multiAddr =
+         multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue();
+     decom.startMaintenanceNodes(Arrays.asList(multiAddr), 100);
+     assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+         nodeManager.getNodeStatus(multiDn).getOperationalState());
+ 
+     // Recommission all 3 hosts
+     decom.recommissionNodes(Arrays.asList(
+         multiAddr, dns.get(1).getIpAddress(), dns.get(2).getIpAddress()));
+     decom.getMonitor().run();
+     assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+         nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+     assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+         nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+     assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+         nodeManager.getNodeStatus(dns.get(10)).getOperationalState());
+   }
+ 
+   @Test
+   public void testNodesCannotTransitionFromDecomToMaint() throws Exception {
+     List<DatanodeDetails> dns = generateDatanodes();
+ 
+     // Put 1 node into maintenance and another into decom
+     decom.startMaintenance(dns.get(1), 100);
+     decom.startDecommission(dns.get(2));
+     assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+         nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+         nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+ 
+     // Try to go from maint to decom:
+     try {
+       decom.startDecommission(dns.get(1));
+       fail("Expected InvalidNodeStateException");
+     } catch (InvalidNodeStateException e) {
+     }
+ 
+     // Try to go from decom to maint:
+     try {
+       decom.startMaintenance(dns.get(2), 100);
+       fail("Expected InvalidNodeStateException");
+     } catch (InvalidNodeStateException e) {
+     }
+ 
+     // Ensure the states are still as before
+     assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+         nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+         nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+   }
+ 
+ 
+ 
+   private SCMNodeManager createNodeManager(OzoneConfiguration config)
+       throws IOException, AuthenticationException {
+     scm = HddsTestUtils.getScm(config);
+     return (SCMNodeManager) scm.getScmNodeManager();
+   }
+ 
+   /**
+    * Generate a list of random DNs and return the list. A total of 11 DNs will
+    * be generated and registered with the node manager. Index 0 and 10 will
+    * have the same IP and host and the rest will have unique IPs and Hosts.
+    * The DN at index 10, has 3 hard coded ports of 3456, 4567, 5678. All other
+    * DNs will have ports set to 0.
+    * @return The list of DatanodeDetails Generated
+    */
+   private List<DatanodeDetails> generateDatanodes() {
+     List<DatanodeDetails> dns = new ArrayList<>();
+     for (int i=0; i<10; i++) {
+       DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+       dns.add(dn);
 -      nodeManager.register(dn, null, null);
++      nodeManager.register(dn, null, null, null);
+     }
+     // We have 10 random DNs, we want to create another one that is on the same
+     // host as some of the others.
+     DatanodeDetails multiDn = dns.get(0);
+ 
+     DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
+     builder.setUuid(UUID.randomUUID())
+         .setHostName(multiDn.getHostName())
+         .setIpAddress(multiDn.getIpAddress())
+         .addPort(DatanodeDetails.newPort(
+             DatanodeDetails.Port.Name.STANDALONE, 3456))
+         .addPort(DatanodeDetails.newPort(
+             DatanodeDetails.Port.Name.RATIS, 4567))
+         .addPort(DatanodeDetails.newPort(
+             DatanodeDetails.Port.Name.REST, 5678))
+         .setNetworkLocation(multiDn.getNetworkLocation());
+ 
+     DatanodeDetails dn = builder.build();
 -    nodeManager.register(dn, null, null);
++    nodeManager.register(dn, null, null, null);
+     dns.add(dn);
+     return dns;
+   }
+ 
 -}
++}
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
index 0000000,e19f626..3962521
mode 000000,100644..100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
@@@ -1,0 -1,308 +1,320 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hdds.scm.node;
+ 
+ import org.apache.hadoop.hdds.conf.ConfigurationSource;
+ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
++import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
+ import org.apache.hadoop.hdds.utils.HddsServerUtil;
+ import org.apache.hadoop.hdds.scm.events.SCMEvents;
+ import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
+ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+ import org.apache.hadoop.hdds.server.events.Event;
+ import org.apache.hadoop.hdds.server.events.EventPublisher;
++import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
+ import org.apache.hadoop.util.Time;
+ import org.junit.After;
+ import org.junit.Before;
+ import org.junit.Test;
++import org.mockito.Mockito;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.Collection;
+ import java.util.List;
+ import java.util.UUID;
+ 
+ import static junit.framework.TestCase.assertEquals;
 -import static junit.framework.TestCase.assertNull;
+ 
+ /**
+  * Class to test the NodeStateManager, which is an internal class used by
+  * the SCMNodeManager.
+  */
+ 
+ public class TestNodeStateManager {
+ 
+   private NodeStateManager nsm;
+   private ConfigurationSource conf;
+   private MockEventPublisher eventPublisher;
++  private static final int TEST_SOFTWARE_LAYOUT_VERSION = 0;
++  private static final int TEST_METADATA_LAYOUT_VERSION = 0;
+ 
+   @Before
+   public void setUp() {
+     conf = new ConfigurationSource() {
+       @Override
+       public String get(String key) {
+         return null;
+       }
+ 
+       @Override
+       public Collection<String> getConfigKeys() {
+         return null;
+       }
+ 
+       @Override
+       public char[] getPassword(String key) throws IOException {
+         return new char[0];
+       }
+     };
+     eventPublisher = new MockEventPublisher();
 -    nsm = new NodeStateManager(conf, eventPublisher);
++    LayoutVersionManager mockVersionManager =
++        Mockito.mock(HDDSLayoutVersionManager.class);
++    Mockito.when(mockVersionManager.getMetadataLayoutVersion())
++        .thenReturn(TEST_METADATA_LAYOUT_VERSION);
++    Mockito.when(mockVersionManager.getSoftwareLayoutVersion())
++        .thenReturn(TEST_SOFTWARE_LAYOUT_VERSION);
++    nsm = new NodeStateManager(conf, eventPublisher, mockVersionManager);
+   }
+ 
+   @After
+   public void tearDown() {
+   }
+ 
+   @Test
+   public void testNodeCanBeAddedAndRetrieved()
+       throws NodeAlreadyExistsException, NodeNotFoundException {
+     // Create a datanode, then add and retrieve it
+     DatanodeDetails dn = generateDatanode();
 -    nsm.addNode(dn);
++    nsm.addNode(dn, null);
+     assertEquals(dn.getUuid(), nsm.getNode(dn).getUuid());
+     // Now get the status of the newly added node and it should be
+     // IN_SERVICE and HEALTHY
 -    NodeStatus expectedState = NodeStatus.inServiceHealthy();
++    NodeStatus expectedState = NodeStatus.inServiceHealthyReadOnly();
+     assertEquals(expectedState, nsm.getNodeStatus(dn));
+   }
+ 
+   @Test
+   public void testGetAllNodesReturnsCorrectly()
+       throws NodeAlreadyExistsException {
+     DatanodeDetails dn = generateDatanode();
 -    nsm.addNode(dn);
++    nsm.addNode(dn, null);
+     dn = generateDatanode();
 -    nsm.addNode(dn);
++    nsm.addNode(dn, null);
+     assertEquals(2, nsm.getAllNodes().size());
+     assertEquals(2, nsm.getTotalNodeCount());
+   }
+ 
+   @Test
+   public void testGetNodeCountReturnsCorrectly()
+       throws NodeAlreadyExistsException {
+     DatanodeDetails dn = generateDatanode();
 -    nsm.addNode(dn);
 -    assertEquals(1, nsm.getNodes(NodeStatus.inServiceHealthy()).size());
++    nsm.addNode(dn, null);
++    assertEquals(1, nsm.getNodes(NodeStatus.inServiceHealthyReadOnly()).size());
+     assertEquals(0, nsm.getNodes(NodeStatus.inServiceStale()).size());
+   }
+ 
+   @Test
+   public void testGetNodeCount() throws NodeAlreadyExistsException {
+     DatanodeDetails dn = generateDatanode();
 -    nsm.addNode(dn);
 -    assertEquals(1, nsm.getNodeCount(NodeStatus.inServiceHealthy()));
++    nsm.addNode(dn, null);
++    assertEquals(1, nsm.getNodeCount(
++        NodeStatus.inServiceHealthyReadOnly()));
+     assertEquals(0, nsm.getNodeCount(NodeStatus.inServiceStale()));
+   }
+ 
+   @Test
+   public void testNodesMarkedDeadAndStale()
+       throws NodeAlreadyExistsException, NodeNotFoundException {
+     long now = Time.monotonicNow();
+ 
+     // Set the dead and stale limits to be 1 second larger than configured
+     long staleLimit = HddsServerUtil.getStaleNodeInterval(conf) + 1000;
+     long deadLimit = HddsServerUtil.getDeadNodeInterval(conf) + 1000;
+ 
+     DatanodeDetails staleDn = generateDatanode();
 -    nsm.addNode(staleDn);
++    nsm.addNode(staleDn, null);
+     nsm.getNode(staleDn).updateLastHeartbeatTime(now - staleLimit);
+ 
+     DatanodeDetails deadDn = generateDatanode();
 -    nsm.addNode(deadDn);
++    nsm.addNode(deadDn, null);
+     nsm.getNode(deadDn).updateLastHeartbeatTime(now - deadLimit);
+ 
+     DatanodeDetails healthyDn = generateDatanode();
 -    nsm.addNode(healthyDn);
++    nsm.addNode(healthyDn, null);
+     nsm.getNode(healthyDn).updateLastHeartbeatTime();
+ 
+     nsm.checkNodesHealth();
+     assertEquals(healthyDn, nsm.getHealthyNodes().get(0));
+     // A node cannot go directly to dead. It must be marked stale first
+     // due to the allowed state transitions. Therefore we will initially have 2
+     // stale nodesCheck it is in stale nodes
+     assertEquals(2, nsm.getStaleNodes().size());
+     // Now check health again and it should be in deadNodes()
+     nsm.checkNodesHealth();
+     assertEquals(staleDn, nsm.getStaleNodes().get(0));
+     assertEquals(deadDn, nsm.getDeadNodes().get(0));
+   }
+ 
+   @Test
+   public void testNodeCanTransitionThroughHealthStatesAndFiresEvents()
+       throws NodeAlreadyExistsException, NodeNotFoundException {
+     long now = Time.monotonicNow();
+ 
+     // Set the dead and stale limits to be 1 second larger than configured
+     long staleLimit = HddsServerUtil.getStaleNodeInterval(conf) + 1000;
+     long deadLimit = HddsServerUtil.getDeadNodeInterval(conf) + 1000;
+ 
+     DatanodeDetails dn = generateDatanode();
 -    nsm.addNode(dn);
++    nsm.addNode(dn, null);
+     assertEquals(SCMEvents.NEW_NODE, eventPublisher.getLastEvent());
+     DatanodeInfo dni = nsm.getNode(dn);
+     dni.updateLastHeartbeatTime();
+ 
+     // Ensure node is initially healthy
+     eventPublisher.clearEvents();
+     nsm.checkNodesHealth();
+     assertEquals(NodeState.HEALTHY, nsm.getNodeStatus(dn).getHealth());
 -    assertNull(eventPublisher.getLastEvent());
++    assertEquals(SCMEvents.READ_ONLY_HEALTHY_TO_HEALTHY_NODE,
++        eventPublisher.getLastEvent());
+ 
+     // Set the heartbeat old enough to make it stale
+     dni.updateLastHeartbeatTime(now - staleLimit);
+     nsm.checkNodesHealth();
+     assertEquals(NodeState.STALE, nsm.getNodeStatus(dn).getHealth());
+     assertEquals(SCMEvents.STALE_NODE, eventPublisher.getLastEvent());
+ 
+     // Now make it dead
+     dni.updateLastHeartbeatTime(now - deadLimit);
+     nsm.checkNodesHealth();
+     assertEquals(NodeState.DEAD, nsm.getNodeStatus(dn).getHealth());
+     assertEquals(SCMEvents.DEAD_NODE, eventPublisher.getLastEvent());
+ 
+     // Transition back to healthy from dead
+     dni.updateLastHeartbeatTime();
+     nsm.checkNodesHealth();
 -    assertEquals(NodeState.HEALTHY, nsm.getNodeStatus(dn).getHealth());
 -    assertEquals(SCMEvents.NON_HEALTHY_TO_HEALTHY_NODE,
++    assertEquals(NodeState.HEALTHY_READONLY, nsm.getNodeStatus(dn).getHealth());
++    assertEquals(SCMEvents.NON_HEALTHY_TO_READONLY_HEALTHY_NODE,
+         eventPublisher.getLastEvent());
+ 
+     // Make the node stale again, and transition to healthy.
+     dni.updateLastHeartbeatTime(now - staleLimit);
+     nsm.checkNodesHealth();
+     assertEquals(NodeState.STALE, nsm.getNodeStatus(dn).getHealth());
+     assertEquals(SCMEvents.STALE_NODE, eventPublisher.getLastEvent());
+     dni.updateLastHeartbeatTime();
+     nsm.checkNodesHealth();
 -    assertEquals(NodeState.HEALTHY, nsm.getNodeStatus(dn).getHealth());
 -    assertEquals(SCMEvents.NON_HEALTHY_TO_HEALTHY_NODE,
++    assertEquals(NodeState.HEALTHY_READONLY, nsm.getNodeStatus(dn).getHealth());
++    assertEquals(SCMEvents.NON_HEALTHY_TO_READONLY_HEALTHY_NODE,
+         eventPublisher.getLastEvent());
+   }
+ 
+   @Test
+   public void testNodeOpStateCanBeSet()
+       throws NodeAlreadyExistsException, NodeNotFoundException {
+     DatanodeDetails dn = generateDatanode();
 -    nsm.addNode(dn);
++    nsm.addNode(dn, null);
+ 
+     nsm.setNodeOperationalState(dn,
+         HddsProtos.NodeOperationalState.DECOMMISSIONED);
+ 
+     NodeStatus newStatus = nsm.getNodeStatus(dn);
+     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED,
+         newStatus.getOperationalState());
 -    assertEquals(NodeState.HEALTHY,
++    assertEquals(NodeState.HEALTHY_READONLY,
+         newStatus.getHealth());
+   }
+ 
+   @Test
+   public void testHealthEventsFiredWhenOpStateChanged()
+       throws NodeAlreadyExistsException, NodeNotFoundException {
+     DatanodeDetails dn = generateDatanode();
 -    nsm.addNode(dn);
++    nsm.addNode(dn, null);
+ 
+     // First set the node to decommissioned, then run through all op states in
+     // order and ensure the non_healthy_to_healthy event gets fired
+     nsm.setNodeOperationalState(dn,
+         HddsProtos.NodeOperationalState.DECOMMISSIONED);
+     for (HddsProtos.NodeOperationalState s :
+         HddsProtos.NodeOperationalState.values()) {
+       eventPublisher.clearEvents();
+       nsm.setNodeOperationalState(dn, s);
 -      assertEquals(SCMEvents.NON_HEALTHY_TO_HEALTHY_NODE,
++      assertEquals(SCMEvents.NON_HEALTHY_TO_READONLY_HEALTHY_NODE,
+           eventPublisher.getLastEvent());
+     }
+ 
+     // Now make the node stale and run through all states again ensuring the
+     // stale event gets fired
+     long now = Time.monotonicNow();
+     long staleLimit = HddsServerUtil.getStaleNodeInterval(conf) + 1000;
+     long deadLimit = HddsServerUtil.getDeadNodeInterval(conf) + 1000;
+     DatanodeInfo dni = nsm.getNode(dn);
+     dni.updateLastHeartbeatTime(now - staleLimit);
+     nsm.checkNodesHealth();
+     assertEquals(NodeState.STALE, nsm.getNodeStatus(dn).getHealth());
+     nsm.setNodeOperationalState(dn,
+         HddsProtos.NodeOperationalState.DECOMMISSIONED);
+     for (HddsProtos.NodeOperationalState s :
+         HddsProtos.NodeOperationalState.values()) {
+       eventPublisher.clearEvents();
+       nsm.setNodeOperationalState(dn, s);
+       assertEquals(SCMEvents.STALE_NODE, eventPublisher.getLastEvent());
+     }
+ 
+     // Finally make the node dead and run through all the op states again
+     dni.updateLastHeartbeatTime(now - deadLimit);
+     nsm.checkNodesHealth();
+     assertEquals(NodeState.DEAD, nsm.getNodeStatus(dn).getHealth());
+     nsm.setNodeOperationalState(dn,
+         HddsProtos.NodeOperationalState.DECOMMISSIONED);
+     for (HddsProtos.NodeOperationalState s :
+         HddsProtos.NodeOperationalState.values()) {
+       eventPublisher.clearEvents();
+       nsm.setNodeOperationalState(dn, s);
+       assertEquals(SCMEvents.DEAD_NODE, eventPublisher.getLastEvent());
+     }
+   }
+ 
+   private DatanodeDetails generateDatanode() {
+     return DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build();
+   }
+ 
+   static class  MockEventPublisher implements EventPublisher {
+ 
+     private List<Event> events = new ArrayList<>();
+     private List<Object> payloads = new ArrayList<>();
+ 
+     public void clearEvents() {
+       events.clear();
+       payloads.clear();
+     }
+ 
+     public List<Event> getEvents() {
+       return events;
+     }
+ 
+     public Event getLastEvent() {
+       if (events.size() == 0) {
+         return null;
+       } else {
+         return events.get(events.size()-1);
+       }
+     }
+ 
+     @Override
+     public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void
+         fireEvent(EVENT_TYPE event, PAYLOAD payload) {
+       events.add(event);
+       payloads.add(payload);
+     }
+   }
+ 
 -}
++}
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index b3f1e8f,3c036d7..face870
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@@ -31,13 -31,10 +31,13 @@@ import java.util.concurrent.TimeoutExce
  import org.apache.hadoop.fs.FileUtil;
  import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
  import org.apache.hadoop.hdds.HddsConfigKeys;
+ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
  import org.apache.hadoop.hdds.conf.OzoneConfiguration;
  import org.apache.hadoop.hdds.protocol.DatanodeDetails;
- import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
  import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 +import org.apache.hadoop.hdds.protocol.proto
 +    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
  import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
  import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
  import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
@@@ -46,17 -43,15 +46,19 @@@ import org.apache.hadoop.hdds.scm.ScmCo
  import org.apache.hadoop.hdds.scm.TestUtils;
  import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
  import org.apache.hadoop.hdds.scm.net.NetworkTopology;
+ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
  import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
  import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
  import org.apache.hadoop.hdds.server.events.EventPublisher;
  import org.apache.hadoop.hdds.server.events.EventQueue;
 +import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
  import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
  import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 +import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
  import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 +import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
+ import org.apache.hadoop.ozone.protocol.commands.SetNodeOperationalStateCommand;
  import org.apache.hadoop.security.authentication.client.AuthenticationException;
  import org.apache.hadoop.test.GenericTestUtils;
  import org.apache.hadoop.test.PathUtils;
@@@ -68,35 -73,17 +80,28 @@@ import static org.apache.hadoop.fs.Comm
  import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
  import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails;
  import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
- import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY;
 +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
 +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.finalizeNewLayoutVersionCommand;
 +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.errorNodeNotPermitted;
 +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.success;
- import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
- import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
 +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT;
- import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 +import static org.apache.hadoop.hdds.scm.TestUtils.getRandomPipelineReports;
 +import static org.apache.hadoop.hdds.scm.events.SCMEvents.*;
+ import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+     .OZONE_SCM_DEADNODE_INTERVAL;
+ import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+     .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
+ import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+     .OZONE_SCM_STALENODE_INTERVAL;
 -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
 -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT;
  import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
- import org.junit.After;
- import org.junit.Assert;
  import static org.junit.Assert.assertEquals;
  import static org.junit.Assert.assertTrue;
 +import static org.mockito.Mockito.mock;
 +import static org.mockito.Mockito.times;
 +import static org.mockito.Mockito.verify;
 +import static org.mockito.Mockito.when;
 +
- import org.junit.Before;
- import org.junit.BeforeClass;
- import org.junit.Rule;
- import org.junit.Test;
- import org.junit.rules.ExpectedException;
 +import org.mockito.ArgumentCaptor;
  import org.mockito.Mockito;
  
  /**
@@@ -336,6 -280,35 +347,35 @@@ public class TestSCMNodeManager 
    }
  
    /**
+    * Ensure that a change to the operationalState of a node fires a datanode
+    * event of type SetNodeOperationalStateCommand.
+    */
+   @Test
+   @Ignore // TODO - this test is no longer valid as the heartbeat processing
 -          //        now generates the command message.
++  //        now generates the command message.
+   public void testSetNodeOpStateAndCommandFired()
+       throws IOException, NodeNotFoundException, AuthenticationException {
+     final int interval = 100;
+ 
+     OzoneConfiguration conf = getConf();
+     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
+         MILLISECONDS);
+ 
+     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
+       DatanodeDetails dn = TestUtils.createRandomDatanodeAndRegister(
+           nodeManager);
+       long expiry = System.currentTimeMillis() / 1000 + 1000;
+       nodeManager.setNodeOperationalState(dn,
+           HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, expiry);
+       List<SCMCommand> commands = nodeManager.getCommandQueue(dn.getUuid());
+ 
+       Assert.assertTrue(commands.get(0).getClass().equals(
+           SetNodeOperationalStateCommand.class));
+       assertEquals(1, commands.size());
+     }
+   }
+ 
+   /**
     * Asserts that a single node moves from Healthy to stale node, then from
     * stale node to dead node if it misses enough heartbeats.
     *
@@@ -397,9 -365,14 +438,14 @@@
            .getUuid(), staleNodeList.get(0).getUuid());
        Thread.sleep(1000);
  
+       Map<String, Map<String, Integer>> nodeCounts = nodeManager.getNodeCount();
+       assertEquals(1,
+           nodeCounts.get(HddsProtos.NodeOperationalState.IN_SERVICE.name())
+               .get(HddsProtos.NodeState.STALE.name()).intValue());
+ 
        // heartbeat good nodes again.
        for (DatanodeDetails dn : nodeList) {
 -        nodeManager.processHeartbeat(dn);
 +        nodeManager.processHeartbeat(dn, layoutInfo);
        }
  
        //  6 seconds is the dead window for this test , so we wait a total of
@@@ -506,10 -481,10 +560,10 @@@
  
        // Step 4 : all nodes should still be HEALTHY
        assertEquals(2, nodeManager.getAllNodes().size());
-       assertEquals(2, nodeManager.getNodeCount(HEALTHY));
+       assertEquals(2, nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
  
        // Step 5 : heartbeat for node1
 -      nodeManager.processHeartbeat(node1);
 +      nodeManager.processHeartbeat(node1, layoutInfo);
  
        // Step 6 : wait for health check process to run
        Thread.sleep(1000);
@@@ -700,14 -609,14 +754,14 @@@
         * the 3 second windows.
         */
  
 -      nodeManager.processHeartbeat(healthyNode);
 -      nodeManager.processHeartbeat(staleNode);
 -      nodeManager.processHeartbeat(deadNode);
 +      nodeManager.processHeartbeat(healthyNode, layoutInfo);
 +      nodeManager.processHeartbeat(staleNode, layoutInfo);
 +      nodeManager.processHeartbeat(deadNode, layoutInfo);
  
        Thread.sleep(1500);
 -      nodeManager.processHeartbeat(healthyNode);
 +      nodeManager.processHeartbeat(healthyNode, layoutInfo);
        Thread.sleep(2 * 1000);
-       assertEquals(1, nodeManager.getNodeCount(HEALTHY));
+       assertEquals(1, nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
  
  
        // 3.5 seconds from last heartbeat for the stale and deadNode. So those
@@@ -820,8 -727,8 +877,8 @@@
     * @return true if we found the expected number.
     */
    private boolean findNodes(NodeManager nodeManager, int count,
--      HddsProtos.NodeState state) {
-     return count == nodeManager.getNodeCount(state);
++                            HddsProtos.NodeState state) {
+     return count == nodeManager.getNodeCount(NodeStatus.inServiceStale());
    }
  
    /**
@@@ -1028,9 -925,8 +1088,11 @@@
        //TODO: wait for EventQueue to be processed
        eventQueue.processAll(8000L);
  
-       assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY_READONLY));
+       assertEquals(nodeCount, nodeManager.getNodeCount(
++          NodeStatus.inServiceHealthyReadOnly()));
 +      Thread.sleep(3 * 1000);
-       assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
++      assertEquals(nodeCount, nodeManager.getNodeCount(
+           NodeStatus.inServiceHealthy()));
        assertEquals(capacity * nodeCount, (long) nodeManager.getStats()
            .getCapacity().get());
        assertEquals(used * nodeCount, (long) nodeManager.getStats()
@@@ -1052,10 -948,10 +1114,10 @@@
     */
    @Test
    public void tesVolumeInfoFromNodeReport()
--          throws IOException, InterruptedException, AuthenticationException {
++      throws IOException, InterruptedException, AuthenticationException {
      OzoneConfiguration conf = getConf();
      conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
--            MILLISECONDS);
++        MILLISECONDS);
      final int volumeCount = 10;
      final long capacity = 2000;
      final long used = 100;
@@@ -1071,26 -967,19 +1133,28 @@@
        for (int x = 0; x < volumeCount; x++) {
          String storagePath = testDir.getAbsolutePath() + "/" + dnId;
          reports.add(TestUtils
--                .createStorageReport(dnId, storagePath, capacity,
--                        used, free, null, failed));
++            .createStorageReport(dnId, storagePath, capacity,
++                used, free, null, failed));
          failed = !failed;
        }
 -      nodeManager.register(dn, TestUtils.createNodeReport(reports), null);
 -      nodeManager.processHeartbeat(dn);
 +      nodeManager.register(dn, TestUtils.createNodeReport(reports), null, null);
 +      LayoutVersionManager versionManager =
 +          nodeManager.getLayoutVersionManager();
 +      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
 +          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
 +          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
 +          .build();
 +      nodeManager.processHeartbeat(dn, layoutInfo);
        //TODO: wait for EventQueue to be processed
        eventQueue.processAll(8000L);
  
-       assertEquals(1, nodeManager.getNodeCount(HEALTHY_READONLY));
++      assertEquals(1, nodeManager.getNodeCount(
++          NodeStatus.inServiceHealthyReadOnly()));
 +      Thread.sleep(3 * 1000);
-       assertEquals(1, nodeManager.getNodeCount(HEALTHY));
+       assertEquals(1, nodeManager
+           .getNodeCount(NodeStatus.inServiceHealthy()));
        assertEquals(volumeCount / 2,
--              nodeManager.minHealthyVolumeNum(dnList));
++          nodeManager.minHealthyVolumeNum(dnList));
        dnList.clear();
      }
    }
@@@ -1135,15 -1024,9 +1199,15 @@@
                  remaining, null);
          NodeReportProto nodeReportProto = TestUtils.createNodeReport(report);
          nodeReportHandler.onMessage(
--                new NodeReportFromDatanode(datanodeDetails, nodeReportProto),
--                publisher);
 -        nodeManager.processHeartbeat(datanodeDetails);
++            new NodeReportFromDatanode(datanodeDetails, nodeReportProto),
++            publisher);
 +        LayoutVersionManager versionManager =
 +            nodeManager.getLayoutVersionManager();
 +        LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
 +            .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
 +            .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
 +            .build();
 +        nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
          Thread.sleep(100);
        }
  
@@@ -1267,7 -1143,7 +1331,7 @@@
  
        nodemanager
            .register(datanodeDetails, TestUtils.createNodeReport(report),
-                   getRandomPipelineReports(), null);
 -                  TestUtils.getRandomPipelineReports());
++              getRandomPipelineReports(), null);
        eq.fireEvent(DATANODE_COMMAND,
            new CommandForDatanode<>(datanodeDetails.getUuid(),
                new CloseContainerCommand(1L,
@@@ -1360,9 -1230,7 +1424,7 @@@
        for (int i = 0; i < nodeCount; i++) {
          DatanodeDetails node = createDatanodeDetails(
              UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
 -        nodeManager.register(node, null, null);
 +        nodeManager.register(node, null, null, null);
-         nodes[i] = node;
        }
  
        // verify network topology cluster has all the registered nodes
@@@ -1404,9 -1273,7 +1467,7 @@@
        for (int i = 0; i < nodeCount; i++) {
          DatanodeDetails node = createDatanodeDetails(
              UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
 -        nodeManager.register(node, null, null);
 +        nodeManager.register(node, null, null, null);
-         nodes[i] = node;
        }
  
        // verify network topology cluster has all the registered nodes
@@@ -1432,6 -1300,58 +1494,64 @@@
      }
    }
  
+   @Test
+   public void testGetNodeInfo()
+       throws IOException, InterruptedException, NodeNotFoundException,
 -        AuthenticationException {
++      AuthenticationException {
+     OzoneConfiguration conf = getConf();
+     final int nodeCount = 6;
+     SCMNodeManager nodeManager = createNodeManager(conf);
+ 
+     for (int i=0; i<nodeCount; i++) {
+       DatanodeDetails datanodeDetails =
+           MockDatanodeDetails.randomDatanodeDetails();
+       final long capacity = 2000;
+       final long used = 100;
+       final long remaining = 1900;
+       UUID dnId = datanodeDetails.getUuid();
+       String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+       StorageReportProto report = TestUtils
+           .createStorageReport(dnId, storagePath, capacity, used,
+               remaining, null);
+ 
 -      nodeManager.register(datanodeDetails, TestUtils.createNodeReport(report),
 -          TestUtils.getRandomPipelineReports());
+ 
 -      nodeManager.processHeartbeat(datanodeDetails);
++      LayoutVersionManager versionManager =
++          nodeManager.getLayoutVersionManager();
++      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
++          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
++          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
++          .build();
++      nodeManager.register(datanodeDetails, TestUtils.createNodeReport(report),
++          TestUtils.getRandomPipelineReports(), layoutInfo);
++      nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
+       if (i == 5) {
+         nodeManager.setNodeOperationalState(datanodeDetails,
+             HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE);
+       }
+       if (i == 3 || i == 4) {
+         nodeManager.setNodeOperationalState(datanodeDetails,
+             HddsProtos.NodeOperationalState.DECOMMISSIONED);
+       }
+     }
+     Thread.sleep(100);
+ 
+     Map<String, Long> stats = nodeManager.getNodeInfo();
+     // 3 IN_SERVICE nodes:
+     assertEquals(6000, stats.get("DiskCapacity").longValue());
+     assertEquals(300, stats.get("DiskUsed").longValue());
+     assertEquals(5700, stats.get("DiskRemaining").longValue());
+ 
+     // 2 Decommissioned nodes
+     assertEquals(4000, stats.get("DecommissionedDiskCapacity").longValue());
+     assertEquals(200, stats.get("DecommissionedDiskUsed").longValue());
+     assertEquals(3800, stats.get("DecommissionedDiskRemaining").longValue());
+ 
+     // 1 Maintenance node
+     assertEquals(2000, stats.get("MaintenanceDiskCapacity").longValue());
+     assertEquals(100, stats.get("MaintenanceDiskUsed").longValue());
+     assertEquals(1900, stats.get("MaintenanceDiskRemaining").longValue());
+   }
+ 
    /**
     * Test add node into a 4-layer network topology during node register.
     */
@@@ -1473,4 -1392,4 +1592,4 @@@
      }
    }
  
--}
++}
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
index 0000000,5954f08..a280778
mode 000000,100644..100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
@@@ -1,0 -1,189 +1,189 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hdds.scm.node.states;
+ 
+ import java.util.List;
+ import java.util.UUID;
+ import java.util.concurrent.CountDownLatch;
+ 
+ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+ import org.apache.hadoop.hdds.scm.container.ContainerID;
+ import org.apache.hadoop.hdds.scm.node.NodeStatus;
+ 
+ import static junit.framework.TestCase.assertEquals;
+ import org.junit.After;
+ import org.junit.Before;
+ import org.junit.Test;
+ 
+ /**
+  * Class to test the NodeStateMap class, which is an internal class used by
+  * NodeStateManager.
+  */
+ 
+ public class TestNodeStateMap {
+ 
+   private NodeStateMap map;
+ 
+   @Before
+   public void setUp() {
+     map = new NodeStateMap();
+   }
+ 
+   @After
+   public void tearDown() {
+   }
+ 
+   @Test
+   public void testNodeCanBeAddedAndRetrieved()
+       throws NodeAlreadyExistsException, NodeNotFoundException {
+     DatanodeDetails dn = generateDatanode();
+     NodeStatus status = NodeStatus.inServiceHealthy();
 -    map.addNode(dn, status);
++    map.addNode(dn, status, null);
+     assertEquals(dn, map.getNodeInfo(dn.getUuid()));
+     assertEquals(status, map.getNodeStatus(dn.getUuid()));
+   }
+ 
+   @Test
+   public void testNodeHealthStateCanBeUpdated()
+       throws NodeAlreadyExistsException, NodeNotFoundException {
+     DatanodeDetails dn = generateDatanode();
+     NodeStatus status = NodeStatus.inServiceHealthy();
 -    map.addNode(dn, status);
++    map.addNode(dn, status, null);
+ 
+     NodeStatus expectedStatus = NodeStatus.inServiceStale();
+     NodeStatus returnedStatus =
+         map.updateNodeHealthState(dn.getUuid(), expectedStatus.getHealth());
+     assertEquals(expectedStatus, returnedStatus);
+     assertEquals(returnedStatus, map.getNodeStatus(dn.getUuid()));
+   }
+ 
+   @Test
+   public void testNodeOperationalStateCanBeUpdated()
+       throws NodeAlreadyExistsException, NodeNotFoundException {
+     DatanodeDetails dn = generateDatanode();
+     NodeStatus status = NodeStatus.inServiceHealthy();
 -    map.addNode(dn, status);
++    map.addNode(dn, status, null);
+ 
+     NodeStatus expectedStatus = new NodeStatus(
+         NodeOperationalState.DECOMMISSIONING,
+         NodeState.HEALTHY, 999);
+     NodeStatus returnedStatus = map.updateNodeOperationalState(
+         dn.getUuid(), expectedStatus.getOperationalState(), 999);
+     assertEquals(expectedStatus, returnedStatus);
+     assertEquals(returnedStatus, map.getNodeStatus(dn.getUuid()));
+     assertEquals(999, returnedStatus.getOpStateExpiryEpochSeconds());
+   }
+ 
+   @Test
+   public void testGetNodeMethodsReturnCorrectCountsAndStates()
+       throws NodeAlreadyExistsException {
+     // Add one node for all possible states
+     int nodeCount = 0;
+     for(NodeOperationalState op : NodeOperationalState.values()) {
+       for(NodeState health : NodeState.values()) {
+         addRandomNodeWithState(op, health);
+         nodeCount++;
+       }
+     }
+     NodeStatus requestedState = NodeStatus.inServiceStale();
+     List<UUID> nodes = map.getNodes(requestedState);
+     assertEquals(1, nodes.size());
+     assertEquals(1, map.getNodeCount(requestedState));
+     assertEquals(nodeCount, map.getTotalNodeCount());
+     assertEquals(nodeCount, map.getAllNodes().size());
+     assertEquals(nodeCount, map.getAllDatanodeInfos().size());
+ 
+     // Checks for the getNodeCount(opstate, health) method
+     assertEquals(nodeCount, map.getNodeCount(null, null));
+     assertEquals(1,
+         map.getNodeCount(NodeOperationalState.DECOMMISSIONING,
+             NodeState.STALE));
+     assertEquals(5, map.getNodeCount(null, NodeState.HEALTHY));
 -    assertEquals(3,
++    assertEquals(4,
+         map.getNodeCount(NodeOperationalState.DECOMMISSIONING, null));
+   }
+ 
+   /**
+    * Test if container list is iterable even if it's modified from other thread.
+    */
+   @Test
+   public void testConcurrency() throws Exception {
+     NodeStateMap nodeStateMap = new NodeStateMap();
+ 
+     final DatanodeDetails datanodeDetails =
+         MockDatanodeDetails.randomDatanodeDetails();
+ 
 -    nodeStateMap.addNode(datanodeDetails, NodeStatus.inServiceHealthy());
++    nodeStateMap.addNode(datanodeDetails, NodeStatus.inServiceHealthy(), null);
+ 
+     UUID dnUuid = datanodeDetails.getUuid();
+ 
+     nodeStateMap.addContainer(dnUuid, new ContainerID(1L));
+     nodeStateMap.addContainer(dnUuid, new ContainerID(2L));
+     nodeStateMap.addContainer(dnUuid, new ContainerID(3L));
+ 
+     CountDownLatch elementRemoved = new CountDownLatch(1);
+     CountDownLatch loopStarted = new CountDownLatch(1);
+ 
+     new Thread(() -> {
+       try {
+         loopStarted.await();
+         nodeStateMap.removeContainer(dnUuid, new ContainerID(1L));
+         elementRemoved.countDown();
+       } catch (Exception e) {
+         e.printStackTrace();
+       }
+ 
+     }).start();
+ 
+     boolean first = true;
+     for (ContainerID key : nodeStateMap.getContainers(dnUuid)) {
+       if (first) {
+         loopStarted.countDown();
+         elementRemoved.await();
+       }
+       first = false;
+       System.out.println(key);
+     }
+   }
+ 
+   private void addNodeWithState(
+       DatanodeDetails dn,
+       NodeOperationalState opState, NodeState health
+   )
+       throws NodeAlreadyExistsException {
+     NodeStatus status = new NodeStatus(opState, health);
 -    map.addNode(dn, status);
++    map.addNode(dn, status, null);
+   }
+ 
+   private void addRandomNodeWithState(
+       NodeOperationalState opState, NodeState health
+   )
+       throws NodeAlreadyExistsException {
+     DatanodeDetails dn = generateDatanode();
+     addNodeWithState(dn, opState, health);
+   }
+ 
+   private DatanodeDetails generateDatanode() {
+     return DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build();
+   }
+ 
 -}
++}
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index 945d890,8c567e9..847627e
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@@ -120,8 -121,8 +121,8 @@@ public class TestSCMBlockProtocolServe
              .setClient(client)
              .build();
      ScmBlockLocationProtocolProtos.SortDatanodesResponseProto resp =
-         service.sortDatanodes(request);
+         service.sortDatanodes(request, CURRENT_VERSION);
 -    Assert.assertTrue(resp.getNodeList().size() == NODE_COUNT);
 +    Assert.assertTrue(resp.getNodeList().size() == nodeCount);
      System.out.println("client = " + client);
      resp.getNodeList().stream().forEach(
          node -> System.out.println(node.getNetworkName()));
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index a322d41,7a71c80..963f9fd
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@@ -282,7 -275,7 +284,7 @@@ public class TestEndPoint 
                    .createNodeReport(
                        getStorageReports(nodeToRegister.getUuid())),
                TestUtils.getRandomContainerReports(10),
-                   TestUtils.getRandomPipelineReports(), layoutInfo);
 -              TestUtils.getRandomPipelineReports());
++              TestUtils.getRandomPipelineReports(), layoutInfo);
        Assert.assertNotNull(responseProto);
        Assert.assertEquals(nodeToRegister.getUuidString(),
            responseProto.getDatanodeUUID());
@@@ -298,8 -291,10 +300,9 @@@
      return TestUtils.createStorageReport(id, storagePath, 100, 10, 90, null);
    }
  
 -  private EndpointStateMachine registerTaskHelper(
 -      InetSocketAddress scmAddress,
 +  private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress,
-       int rpcTimeout, boolean clearDatanodeDetails) throws Exception {
+       int rpcTimeout, boolean clearDatanodeDetails
+   ) throws Exception {
      OzoneConfiguration conf = SCMTestUtils.getConf();
      EndpointStateMachine rpcEndPoint =
          createEndpoint(conf,
@@@ -313,16 -308,10 +316,16 @@@
          TestUtils.getRandomContainerReports(10));
      when(ozoneContainer.getController()).thenReturn(controller);
      when(ozoneContainer.getPipelineReport()).thenReturn(
-             TestUtils.getRandomPipelineReports());
+         TestUtils.getRandomPipelineReports());
 +    HDDSLayoutVersionManager versionManager =
 +        Mockito.mock(HDDSLayoutVersionManager.class);
 +    when(versionManager.getMetadataLayoutVersion())
 +        .thenReturn(TEST_METADATA_LAYOUT_VERSION);
 +    when(versionManager.getSoftwareLayoutVersion())
 +        .thenReturn(TEST_SOFTWARE_LAYOUT_VERSION);
      RegisterEndpointTask endpointTask =
          new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer,
 -            mock(StateContext.class));
 +            mock(StateContext.class), versionManager);
      if (!clearDatanodeDetails) {
        DatanodeDetails datanodeDetails = randomDatanodeDetails();
        endpointTask.setDatanodeDetails(datanodeDetails);
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index b917309,048b953..4f8c5a8
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@@ -17,12 -17,11 +17,12 @@@
  package org.apache.hadoop.ozone.container.testutils;
  
  import com.google.common.base.Preconditions;
 +
- import org.apache.hadoop.hdds.protocol.proto
-     .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
- import org.apache.hadoop.hdds.protocol.proto
-         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
++import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 -import org.apache.hadoop.hdds.protocol.proto
 -        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
++import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
  import org.apache.hadoop.hdds.scm.container.ContainerID;
+ import org.apache.hadoop.hdds.scm.node.NodeStatus;
  import org.apache.hadoop.hdds.scm.net.NetworkTopology;
  import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
  import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
index 83b4bb0,a6bd744..309f8d5
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
@@@ -99,20 -83,12 +99,19 @@@ public class TestSCMNodeMetrics 
    /**
     * Verifies heartbeat processing count.
     *
-    * @throws InterruptedException
     */
    @Test
 -  public void testHBProcessing() {
 +  public void testHBProcessing() throws InterruptedException {
      long hbProcessed = getCounter("NumHBProcessed");
  
 -    nodeManager.processHeartbeat(registeredDatanode);
 +    NodeReportProto nodeReport = createNodeReport();
 +
 +    LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
 +    LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
 +        .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
 +        .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
 +        .build();
 +    nodeManager.processHeartbeat(registeredDatanode, layoutInfo);
  
      assertEquals("NumHBProcessed", hbProcessed + 1,
          getCounter("NumHBProcessed"));
@@@ -195,31 -166,73 +194,76 @@@
  
      nodeManager.processNodeReport(registeredDatanode, nodeReport);
  
 -    assertGauge("InServiceHealthyNodes", 1,
 +    MetricsRecordBuilder metricsSource = getMetrics(SCMNodeMetrics.SOURCE_NAME);
 +
-     assertGauge("HealthyReadOnlyNodes", 1, metricsSource);
-     assertGauge("StaleNodes", 0, metricsSource);
-     assertGauge("DeadNodes", 0, metricsSource);
-     assertGauge("DecommissioningNodes", 0, metricsSource);
-     assertGauge("DecommissionedNodes", 0, metricsSource);
-     assertGauge("DiskCapacity", 100L, metricsSource);
-     assertGauge("DiskUsed", 10L, metricsSource);
-     assertGauge("DiskRemaining", 90L, metricsSource);
-     assertGauge("SSDCapacity", 0L, metricsSource);
-     assertGauge("SSDUsed", 0L, metricsSource);
-     assertGauge("SSDRemaining", 0L, metricsSource);
++    assertGauge("InServiceHealthyNodes", 0,
++        getMetrics(SCMNodeMetrics.class.getSimpleName()));
++    assertGauge("InServiceHealthyReadonlyNodes", 1,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("InServiceStaleNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("InServiceDeadNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissioningHealthyNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissioningStaleNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissioningDeadNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissionedHealthyNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissionedStaleNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissionedDeadNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("EnteringMaintenanceHealthyNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("EnteringMaintenanceStaleNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("EnteringMaintenanceDeadNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("InMaintenanceHealthyNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("InMaintenanceStaleNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("InMaintenanceDeadNodes", 0,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
 -    assertGauge("DiskCapacity", 100L,
 -        getMetrics(SCMNodeMetrics.class.getSimpleName()));
 -    assertGauge("DiskUsed", 10L,
 -        getMetrics(SCMNodeMetrics.class.getSimpleName()));
 -    assertGauge("DiskRemaining", 90L,
 -        getMetrics(SCMNodeMetrics.class.getSimpleName()));
 -    assertGauge("SSDCapacity", 0L,
 -        getMetrics(SCMNodeMetrics.class.getSimpleName()));
 -    assertGauge("SSDUsed", 0L,
 -        getMetrics(SCMNodeMetrics.class.getSimpleName()));
 -    assertGauge("SSDRemaining", 0L,
 -        getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("MaintenanceDiskCapacity", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("MaintenanceDiskUsed", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("MaintenanceDiskRemaining", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("MaintenanceSSDCapacity", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("MaintenanceSSDUsed", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("MaintenanceSSDRemaining", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissionedDiskCapacity", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissionedDiskUsed", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissionedDiskRemaining", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissionedSSDCapacity", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissionedSSDUsed", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
+     assertGauge("DecommissionedSSDRemaining", 0L,
+         getMetrics(SCMNodeMetrics.class.getSimpleName()));
  
 +    LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
 +    LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
 +        .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
 +        .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
 +        .build();
 +    nodeManager.processHeartbeat(registeredDatanode, layoutInfo);
 +    sleep(4000);
 +    metricsSource = getMetrics(SCMNodeMetrics.SOURCE_NAME);
-     assertGauge("HealthyReadOnlyNodes", 0, metricsSource);
-     assertGauge("HealthyNodes", 1, metricsSource);
++    assertGauge("InServiceHealthyReadonlyNodes", 0, metricsSource);
++    assertGauge("InServiceHealthyNodes", 1, metricsSource);
 +
    }
  
    private long getCounter(String metricName) {
diff --cc hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index 2c4d89d,b676bca..7dacf98
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@@ -233,11 -233,7 +233,13 @@@ public class OMException extends IOExce
  
      QUOTA_EXCEEDED,
  
 -    QUOTA_ERROR
 -
++    QUOTA_ERROR,
++
 +    PERSIST_UPGRADE_TO_LAYOUT_VERSION_FAILED,
 +    REMOVE_UPGRADE_TO_LAYOUT_VERSION_FAILED,
 +    UPDATE_LAYOUT_VERSION_FAILED,
 +    LAYOUT_FEATURE_FINALIZATION_FAILED,
 +    PREPARE_FAILED,
 +    NOT_SUPPORTED_OPERATION_WHEN_PREPARED
    }
  }
diff --cc hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index a26c436,6d10e42..fc46059
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@@ -156,9 -149,9 +156,10 @@@ import com.google.common.base.Precondit
  import com.google.common.base.Strings;
  import com.google.protobuf.ByteString;
  
+ import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
  import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
  import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*;
  import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.ACCESS_DENIED;
  import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.DIRECTORY_ALREADY_EXISTS;
  import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
diff --cc hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
index 7e024ed,5913b65..715639a
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
@@@ -27,11 -28,10 +28,14 @@@ source "$COMPOSE_DIR/../testlib.sh
  start_docker_env
  
  execute_robot_test scm basic/ozone-shell-single.robot
+ execute_robot_test scm basic/links.robot
+ execute_robot_test scm s3
+ execute_robot_test scm freon
  
 +# prepare test should be the last test to run, until a cancel prepare test is
 +# added. (TODO)
 +execute_robot_test scm omha/om-prepare.robot
 +
  stop_docker_env
  
  generate_report
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
index 07ba70f,0000000..dc8d1bb
mode 100644,000000..100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
@@@ -1,282 -1,0 +1,288 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership.  The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + *
 + */
 +
 +package org.apache.hadoop.hdds.upgrade;
 +
 +import static java.lang.Thread.sleep;
 +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
 +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED;
 +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.OPEN;
 +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED;
 +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
 +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY;
 +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
 +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
 +import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_DONE;
 +import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.STARTING_FINALIZATION;
 +
 +import java.io.IOException;
 +import java.util.Iterator;
 +import java.util.concurrent.TimeUnit;
 +import java.util.concurrent.TimeoutException;
 +
 +import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 +import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 +import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 +import org.apache.hadoop.hdds.scm.XceiverClientManager;
 +import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 +import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 +import org.apache.hadoop.hdds.scm.container.ContainerManager;
++import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 +import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 +import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 +import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
 +import org.apache.hadoop.ozone.HddsDatanodeService;
 +import org.apache.hadoop.ozone.MiniOzoneCluster;
 +import org.apache.hadoop.ozone.container.common.interfaces.Container;
 +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 +import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages;
 +import org.apache.hadoop.test.GenericTestUtils;
 +import org.junit.AfterClass;
 +import org.junit.Assert;
 +import org.junit.BeforeClass;
 +import org.junit.Rule;
 +import org.junit.Test;
 +import org.junit.rules.Timeout;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +/**
 + * Test SCM and DataNode Upgrade sequence.
 + */
 +public class TestHDDSUpgrade {
 +
 +  /**
 +    * Set a timeout for each test.
 +    */
 +  @Rule
 +  public Timeout timeout = new Timeout(300000);
 +  private static final Logger LOG =
 +      LoggerFactory.getLogger(TestHDDSUpgrade.class);
 +  private static final int NUM_DATA_NODES = 3;
 +
 +  private static MiniOzoneCluster cluster;
 +  private static OzoneConfiguration conf;
 +  private static StorageContainerManager scm;
 +  private static ContainerManager scmContainerManager;
 +  private static PipelineManager scmPipelineManager;
 +  private static Pipeline ratisPipeline1;
 +  private static final int CONTAINERS_CREATED_FOR_TESTING = 1;
 +  private static HDDSLayoutVersionManager scmVersionManager;
 +
 +  /**
 +   * Create a MiniDFSCluster for testing.
 +   *
 +   * @throws IOException
 +   */
 +  @BeforeClass
 +  public static void init() throws Exception {
 +    conf = new OzoneConfiguration();
 +    conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000,
 +            TimeUnit.MILLISECONDS);
 +    int numOfNodes = NUM_DATA_NODES;
 +    cluster = MiniOzoneCluster.newBuilder(conf)
 +        .setNumDatanodes(numOfNodes)
 +        // allow only one FACTOR THREE pipeline.
 +        .setTotalPipelineNumLimit(numOfNodes + 1)
 +        .setHbInterval(1000)
 +        .setHbProcessorInterval(1000)
 +        .build();
 +    cluster.waitForClusterToBeReady();
 +    scm = cluster.getStorageContainerManager();
 +    scmContainerManager = scm.getContainerManager();
 +    scmPipelineManager = scm.getPipelineManager();
 +    scmVersionManager = scm.getLayoutVersionManager();
 +
 +    // we will create CONTAINERS_CREATED_FOR_TESTING number of containers.
 +    XceiverClientManager xceiverClientManager = new XceiverClientManager(conf);
 +    ContainerInfo ci1 = scmContainerManager.allocateContainer(
 +        RATIS, THREE, "Owner1");
 +    ratisPipeline1 = scmPipelineManager.getPipeline(ci1.getPipelineID());
 +    scmPipelineManager.openPipeline(ratisPipeline1.getId());
 +    XceiverClientSpi client1 =
 +        xceiverClientManager.acquireClient(ratisPipeline1);
 +    ContainerProtocolCalls.createContainer(client1,
 +        ci1.getContainerID(), null);
 +    // At this stage, there should be 1 pipeline one with 1 open container
 +    // each.
 +    xceiverClientManager.releaseClient(client1, false);
 +  }
 +
 +  /**
 +   * Shutdown MiniDFSCluster.
 +   */
 +  @AfterClass
 +  public static void shutdown() {
 +    if (cluster != null) {
 +      cluster.shutdown();
 +    }
 +  }
 +
 +  private void testPreUpgradeConditionsSCM() {
 +    Assert.assertEquals(0, scmVersionManager.getMetadataLayoutVersion());
 +    for (ContainerInfo ci : scmContainerManager.getContainers()) {
 +      Assert.assertEquals(ci.getState(), HddsProtos.LifeCycleState.OPEN);
 +    }
 +  }
 +
 +  private void testPostUpgradeConditionsSCM() {
 +    Assert.assertEquals(scmVersionManager.getSoftwareLayoutVersion(),
 +        scmVersionManager.getMetadataLayoutVersion());
 +    Assert.assertTrue(scmVersionManager.getMetadataLayoutVersion() >= 1);
 +    int countContainers = 0;
 +    for (ContainerInfo ci : scmContainerManager.getContainers()) {
 +      HddsProtos.LifeCycleState ciState = ci.getState();
 +      Assert.assertTrue((ciState == HddsProtos.LifeCycleState.CLOSED) ||
 +          (ciState == HddsProtos.LifeCycleState.CLOSING) ||
 +          (ciState == HddsProtos.LifeCycleState.QUASI_CLOSED));
 +      countContainers++;
 +    }
 +    Assert.assertEquals(CONTAINERS_CREATED_FOR_TESTING, countContainers);
 +  }
 +
 +  private void testPreUpgradeConditionsDataNodes() {
 +    for (HddsDatanodeService dataNode : cluster.getHddsDatanodes()) {
 +      DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine();
 +      HDDSLayoutVersionManager dnVersionManager =
 +          dsm.getDataNodeVersionManager();
 +      Assert.assertEquals(0, dnVersionManager.getMetadataLayoutVersion());
 +
 +    }
 +
 +    int countContainers = 0;
 +    for (HddsDatanodeService dataNode : cluster.getHddsDatanodes()) {
 +      DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine();
 +      // Also verify that all the existing containers are open.
 +      for (Iterator<Container<?>> it =
 +           dsm.getContainer().getController().getContainers(); it.hasNext();) {
 +        Container container = it.next();
 +        Assert.assertTrue(container.getContainerState() == OPEN);
 +        countContainers++;
 +      }
 +    }
 +    Assert.assertTrue(countContainers >= 1);
 +  }
 +
 +
 +  private void testPostUpgradeConditionsDataNodes() {
 +    try {
 +      GenericTestUtils.waitFor(() -> {
 +        for (HddsDatanodeService dataNode : cluster.getHddsDatanodes()) {
 +          DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine();
 +          HDDSLayoutVersionManager dnVersionManager =
 +              dsm.getDataNodeVersionManager();
 +          try {
 +            if (dsm.queryUpgradeStatus().status() != FINALIZATION_DONE) {
 +              return false;
 +            }
 +          } catch (IOException e) {
 +            e.printStackTrace();
 +            return false;
 +          }
 +        }
 +        return true;
 +      }, 1000, 20000);
 +    } catch (TimeoutException | InterruptedException e) {
 +      Assert.fail("Timeout waiting for Upgrade to complete on Data Nodes.");
 +    }
 +
 +    int countContainers = 0;
 +    for (HddsDatanodeService dataNode : cluster.getHddsDatanodes()) {
 +      DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine();
 +      HDDSLayoutVersionManager dnVersionManager =
 +          dsm.getDataNodeVersionManager();
 +      Assert.assertEquals(dnVersionManager.getSoftwareLayoutVersion(),
 +          dnVersionManager.getMetadataLayoutVersion());
 +      Assert.assertTrue(dnVersionManager.getMetadataLayoutVersion() >= 1);
 +
 +      // Also verify that all the existing containers are closed.
 +      for (Iterator<Container<?>> it =
 +           dsm.getContainer().getController().getContainers(); it.hasNext();) {
 +        Container container = it.next();
 +        Assert.assertTrue(container.getContainerState() == CLOSED ||
 +            container.getContainerState() == QUASI_CLOSED);
 +        countContainers++;
 +      }
 +    }
 +    Assert.assertTrue(countContainers >= 1);
 +  }
 +
 +  private void testPostUpgradePipelineCreation() throws IOException {
 +    ratisPipeline1 = scmPipelineManager.createPipeline(RATIS, THREE);
 +    scmPipelineManager.openPipeline(ratisPipeline1.getId());
 +    Assert.assertEquals(0,
 +        scmPipelineManager.getNumberOfContainers(ratisPipeline1.getId()));
 +    PipelineID pid = scmContainerManager.allocateContainer(RATIS, THREE,
 +        "Owner1").getPipelineID();
 +    Assert.assertEquals(1, scmPipelineManager.getNumberOfContainers(pid));
 +    Assert.assertEquals(pid, ratisPipeline1.getId());
 +  }
 +
 +  private void testDataNodesStateOnSCM(NodeState state) {
 +    int countNodes = 0;
 +    for (DatanodeDetails dn : scm.getScmNodeManager().getAllNodes()){
-       Assert.assertEquals(state,
-           scm.getScmNodeManager().getNodeState(dn));
++      try {
++        Assert.assertEquals(state,
++            scm.getScmNodeManager().getNodeStatus(dn).getHealth());
++      } catch (NodeNotFoundException e) {
++        e.printStackTrace();
++        Assert.fail("Node not found");
++      }
 +      ++countNodes;
 +    }
 +    Assert.assertEquals(NUM_DATA_NODES, countNodes);
 +  }
 +
 +  @Test
 +  public void testLayoutUpgrade() throws IOException, InterruptedException {
 +    // Test the Pre-Upgrade conditions on SCM as well as DataNodes.
 +    testPreUpgradeConditionsSCM();
 +    testPreUpgradeConditionsDataNodes();
 +
 +    // Trigger Finalization on the SCM
 +    StatusAndMessages status = scm.finalizeUpgrade("xyz");
 +    Assert.assertEquals(STARTING_FINALIZATION, status.status());
 +
 +    // Wait for the Finalization to complete on the SCM.
 +    while (status.status() != FINALIZATION_DONE) {
 +      status = scm.queryUpgradeFinalizationProgress("xyz", false);
 +    }
 +
 +    // Verify Post-Upgrade conditions on the SCM.
 +    testPostUpgradeConditionsSCM();
 +
 +    // All datanodes on the SCM should have moved to HEALTHY-READONLY state.
 +    testDataNodesStateOnSCM(HEALTHY_READONLY);
 +
 +    // Verify the SCM has driven all the DataNodes through Layout Upgrade.
 +    sleep(5000);
 +    testPostUpgradeConditionsDataNodes();
 +
 +    // Allow some time for heartbeat exchanges.
 +    sleep(5000);
 +
 +    // All datanodes on the SCM should have moved to HEALTHY-READ-WRITE state.
 +    testDataNodesStateOnSCM(HEALTHY);
 +
 +    // Verify that new pipeline can be created with upgraded datanodes.
 +    testPostUpgradePipelineCreation();
 +  }
 +}
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 035602c,7bccd8e..071f8db
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@@ -55,10 -26,7 +55,9 @@@ import org.apache.hadoop.hdds.HddsUtils
  import org.apache.hadoop.hdds.conf.OzoneConfiguration;
  import org.apache.hadoop.hdds.protocol.DatanodeDetails;
  import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
- import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
  import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
 +import org.apache.hadoop.hdds.protocol.proto
 +    .StorageContainerDatanodeProtocolProtos;
  import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
  import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
  import org.apache.hadoop.hdds.scm.ScmConfig;
@@@ -378,17 -372,8 +375,17 @@@ public class TestStorageContainerManage
        GenericTestUtils.waitFor(() -> {
          NodeManager nodeManager = cluster.getStorageContainerManager()
              .getScmNodeManager();
 +        LayoutVersionManager versionManager =
 +            nodeManager.getLayoutVersionManager();
 +        StorageContainerDatanodeProtocolProtos.LayoutVersionProto layoutInfo
 +            = StorageContainerDatanodeProtocolProtos.LayoutVersionProto
 +            .newBuilder()
 +            .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
 +            .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
 +            .build();
          List<SCMCommand> commands = nodeManager.processHeartbeat(
-             nodeManager.getNodes(NodeState.HEALTHY).get(0), layoutInfo);
- 
 -            nodeManager.getNodes(NodeStatus.inServiceHealthy()).get(0));
++            nodeManager.getNodes(NodeStatus.inServiceHealthy()).get(0),
++            layoutInfo);
          if (commands != null) {
            for (SCMCommand cmd : commands) {
              if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) {
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index 8c18262,31acf4f..bad6054
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@@ -315,10 -338,52 +338,50 @@@ public final class TestHelper 
    }
  
    public static HddsDatanodeService getDatanodeService(OmKeyLocationInfo info,
 -      MiniOzoneCluster cluster)
 -      throws IOException {
 +      MiniOzoneCluster cluster) throws IOException {
      DatanodeDetails dnDetails =  info.getPipeline().
-             getFirstNode();
+         getFirstNode();
      return cluster.getHddsDatanodes().get(cluster.
-             getHddsDatanodeIndex(dnDetails));
+         getHddsDatanodeIndex(dnDetails));
+   }
+ 
+   public static Set<HddsDatanodeService> getDatanodeServices(
+       MiniOzoneCluster cluster, Pipeline pipeline) {
+     Set<HddsDatanodeService> services = new HashSet<>();
+     Set<DatanodeDetails> pipelineNodes = pipeline.getNodeSet();
+     for (HddsDatanodeService service : cluster.getHddsDatanodes()) {
+       if (pipelineNodes.contains(service.getDatanodeDetails())) {
+         services.add(service);
+       }
+     }
+     Assert.assertEquals(pipelineNodes.size(), services.size());
+     return services;
+   }
+ 
+   public static int countReplicas(long containerID, MiniOzoneCluster cluster) {
+     ContainerManager containerManager = cluster.getStorageContainerManager()
+         .getContainerManager();
+     try {
+       Set<ContainerReplica> replicas = containerManager
+           .getContainerReplicas(ContainerID.valueof(containerID));
+       LOG.info("Container {} has {} replicas on {}", containerID,
+           replicas.size(),
+           replicas.stream()
+               .map(ContainerReplica::getDatanodeDetails)
+               .map(DatanodeDetails::getUuidString)
+               .sorted()
+               .collect(toList())
+       );
+       return replicas.size();
+     } catch (ContainerNotFoundException e) {
+       LOG.warn("Container {} not found", containerID);
+       return 0;
+     }
+   }
+ 
+   public static void waitForReplicaCount(long containerID, int count,
+       MiniOzoneCluster cluster) throws TimeoutException, InterruptedException {
+     GenericTestUtils.waitFor(() -> countReplicas(containerID, cluster) == count,
+         1000, 30_000);
    }
 -
  }
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
index a642f5a,0000000..8dfc433
mode 100644,000000..100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
@@@ -1,430 -1,0 +1,430 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership.  The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +
 +package org.apache.hadoop.ozone.om;
 +
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +
 +import java.io.File;
 +import java.nio.file.Paths;
 +import java.util.ArrayList;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Set;
 +import java.util.UUID;
 +import java.util.concurrent.Callable;
 +import java.util.concurrent.ExecutionException;
 +import java.util.concurrent.ExecutorService;
 +import java.util.concurrent.Executors;
 +import java.util.concurrent.Future;
 +
 +import org.apache.hadoop.hdds.client.ReplicationFactor;
 +import org.apache.hadoop.hdds.client.ReplicationType;
 +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
 +import org.apache.hadoop.ozone.client.ObjectStore;
 +import org.apache.hadoop.ozone.client.OzoneVolume;
 +import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 +import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 +import org.apache.hadoop.ozone.container.ContainerTestHelper;
 +import org.apache.hadoop.ozone.container.TestHelper;
 +import org.apache.hadoop.ozone.om.exceptions.OMException;
 +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse.PrepareStatus;
 +import org.apache.hadoop.test.LambdaTestUtils;
 +import org.junit.Assert;
 +import org.junit.Test;
 +
 +/**
 + * Test OM prepare against actual mini cluster.
 + */
 +public class TestOzoneManagerPrepare extends TestOzoneManagerHA {
 +  private static final String BUCKET = "bucket";
 +  private static final String VOLUME = "volume";
 +  private static final String KEY_PREFIX = "key";
 +
 +  // Maximum time to wait for conditions involving Ratis logs.
 +  private static final int WAIT_TIMEOUT_MILLIS = 120000;
-   private final static long PREPARE_FLUSH_WAIT_TIMEOUT_SECONDS = 120L;
-   private final static long PREPARE_FLUSH_INTERVAL_SECONDS = 5L;
++  private static final long PREPARE_FLUSH_WAIT_TIMEOUT_SECONDS = 120L;
++  private static final long PREPARE_FLUSH_INTERVAL_SECONDS = 5L;
 +
 +  private MiniOzoneHAClusterImpl cluster;
 +  private ClientProtocol clientProtocol;
 +  private ObjectStore store;
 +
 +  public void setup() throws Exception {
 +    cluster = getCluster();
 +    store = getObjectStore();
 +    clientProtocol = store.getClientProxy();
 +
 +    store.createVolume(VOLUME);
 +    OzoneVolume volume = store.getVolume(VOLUME);
 +    volume.createBucket(BUCKET);
 +  }
 +
 +  /**
 +   * Calls prepare on all OMs when they have no transaction information.
 +   * Checks that they are brought into prepare mode successfully.
 +   */
 +  @Test
 +  public void testPrepareWithoutTransactions() throws Exception {
 +    setup();
 +    long prepareIndex = submitPrepareRequest();
 +    assertClusterPrepared(prepareIndex);
 +    assertRatisLogsCleared();
 +  }
 +
 +  /**
 +   * Writes data to the cluster via the leader OM, and then prepares it.
 +   * Checks that every OM is prepared successfully.
 +   */
 +  @Test
 +  public void testPrepareWithTransactions() throws Exception {
 +    setup();
 +    Set<String> writtenKeys = writeKeysAndWaitForLogs(50);
 +    long prepareIndex = submitPrepareRequest();
 +
 +    // Make sure all OMs are prepared and all OMs still have their data.
 +    assertClusterPrepared(prepareIndex);
 +    assertRatisLogsCleared();
 +    assertKeysWritten(writtenKeys);
 +  }
 +
 +  /**
 +   * Writes data to the cluster.
 +   * Shuts down one OM.
 +   * Writes more data to the cluster.
 +   * Submits prepare as ratis request.
 +   * Checks that two live OMs are prepared.
 +   * Revives the third OM
 +   * Checks that third OM received all transactions and is prepared.
 +   * @throws Exception
 +   */
 +  // TODO: This test should be passing after HDDS-4610 and RATIS-1241
 +  // @Test
 +  public void testPrepareDownedOM() throws Exception {
 +    setup();
 +    // Index of the OM that will be shut down during this test.
 +    final int shutdownOMIndex = 2;
 +    List<OzoneManager> runningOms = cluster.getOzoneManagersList();
 +
 +    // Create keys with all 3 OMs up.
 +    Set<String> writtenKeys = writeKeysAndWaitForLogs(10, runningOms);
 +
 +    // Shut down one OM.
 +    cluster.stopOzoneManager(shutdownOMIndex);
 +    OzoneManager downedOM = cluster.getOzoneManager(shutdownOMIndex);
 +    Assert.assertFalse(downedOM.isRunning());
 +    Assert.assertEquals(runningOms.remove(shutdownOMIndex), downedOM);
 +
 +    // Write keys with the remaining OMs up.
 +    writtenKeys.addAll(
 +        writeKeysAndWaitForLogs(10, runningOms));
 +
 +    long prepareIndex = submitPrepareRequest();
 +
 +    // Check that the two live OMs are prepared.
 +    assertClusterPrepared(prepareIndex, runningOms);
 +
 +    // Restart the downed OM and wait for it to catch up.
 +    // Since prepare was the last Ratis transaction, it should have all data
 +    // it missed once it receives the prepare transaction.
 +    cluster.restartOzoneManager(downedOM, true);
 +    runningOms.add(shutdownOMIndex, downedOM);
 +
 +    // Make sure all OMs are prepared and still have data.
 +    assertClusterPrepared(prepareIndex, runningOms);
 +    assertKeysWritten(writtenKeys, runningOms);
 +  }
 +
 +  // TODO: This test should be passing after HDDS-4610 and RATIS-1241
 +  // @Test
 +  public void testPrepareWithRestart() throws Exception {
 +    setup();
 +    writeKeysAndWaitForLogs(10);
 +    long prepareIndex = submitPrepareRequest();
 +    assertClusterPrepared(prepareIndex);
 +
 +    // Restart all ozone managers.
 +    cluster.restartOzoneManager();
 +
 +    // No check for cleared logs, since Ratis meta transactions may slip in
 +    // on restart.
 +    assertClusterPrepared(prepareIndex);
 +  }
 +
 +  /**
 +   * Issues requests on ten different threads, for which one is a prepare and
 +   * the rest are create volume. We cannot be sure of the exact order that
 +   * the requests will execute, so this test checks that the cluster ends in
 +   * a prepared state, and that create volume requests either succeed, or fail
 +   * indicating the cluster was prepared before they were encountered.
 +   * @throws Exception
 +   */
 +  @Test
 +  public void testPrepareWithMultipleThreads() throws Exception {
 +    setup();
 +    final int numThreads = 10;
 +    final int prepareTaskIndex = 5;
 +
 +    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
 +    // For the prepare task, the future will return a log index.
 +    // For the create volume tasks, 0 (dummy value) will be returned.
 +    List<Future<Long>> tasks = new ArrayList<>();
 +
 +    for (int i = 0; i < numThreads; i++) {
 +      Callable<Long> task;
 +      if (i == prepareTaskIndex) {
 +        task = this::submitPrepareRequest;
 +      } else {
 +        String volumeName = VOLUME + i;
 +        task = () -> {
 +          clientProtocol.createVolume(volumeName);
 +          return 0L;
 +        };
 +      }
 +      tasks.add(executorService.submit(task));
 +    }
 +
 +    // For each task, wait for it to complete and check its result.
 +    for (int i = 0; i < numThreads; i++) {
 +      Future<Long> future = tasks.get(i);
 +
 +      if (i == prepareTaskIndex) {
 +        assertClusterPrepared(future.get());
 +        assertRatisLogsCleared();
 +      } else {
 +        try {
 +          // If this throws an exception, it should be an OMException
 +          // indicating failure because the cluster was already prepared.
 +          // If no exception is thrown, the volume should be created.
 +          future.get();
 +          String volumeName = VOLUME + i;
 +          Assert.assertTrue(clientProtocol.listVolumes(volumeName, "", 1)
 +              .stream()
 +              .anyMatch((vol) -> vol.getName().equals(volumeName)));
 +        } catch (ExecutionException ex) {
 +          Throwable cause = ex.getCause();
 +          Assert.assertTrue(cause instanceof OMException);
 +          Assert.assertEquals(
 +              OMException.ResultCodes.NOT_SUPPORTED_OPERATION_WHEN_PREPARED,
 +              ((OMException) cause).getResult());
 +        }
 +      }
 +    }
 +
 +    // In the above loop, we have waited for all threads to terminate.
 +    executorService.shutdown();
 +  }
 +
 +  @Test
 +  public void testCancelPrepare() throws Exception {
 +    setup();
 +    Set<String> writtenKeys = writeKeysAndWaitForLogs(10);
 +    long prepareIndex = submitPrepareRequest();
 +
 +    // Make sure all OMs are prepared and all OMs still have their data.
 +    assertClusterPrepared(prepareIndex);
 +    assertRatisLogsCleared();
 +    assertKeysWritten(writtenKeys);
 +
 +    // Cancel prepare and check that data is still present.
 +    submitCancelPrepareRequest();
 +    assertClusterNotPrepared();
 +    assertKeysWritten(writtenKeys);
 +
 +    // Cancelling prepare again should have no effect.
 +    submitCancelPrepareRequest();
 +    assertClusterNotPrepared();
 +
 +    // Write more data after cancelling prepare.
 +    writtenKeys.addAll(writeKeysAndWaitForLogs(10));
 +
 +    // Cancelling prepare again should have no effect and new data should be
 +    // preserved.
 +    submitCancelPrepareRequest();
 +    assertClusterNotPrepared();
 +    assertKeysWritten(writtenKeys);
 +  }
 +
 +  private boolean logFilesPresentInRatisPeer(OzoneManager om) {
 +    String ratisDir = om.getOmRatisServer().getServer().getProperties()
 +        .get("raft.server.storage.dir");
 +    String groupIdDirName =
 +        om.getOmRatisServer().getServer().getGroupIds().iterator()
 +            .next().getUuid().toString();
 +    File logDir = Paths.get(ratisDir, groupIdDirName, "current")
 +        .toFile();
 +
 +    for (File file : logDir.listFiles()) {
 +      if (file.getName().startsWith("log")) {
 +        return true;
 +      }
 +    }
 +    return false;
 +  }
 +
 +  private Set<String> writeKeysAndWaitForLogs(int numKeys) throws Exception {
 +    return writeKeysAndWaitForLogs(numKeys, cluster.getOzoneManagersList());
 +  }
 +
 +  private Set<String> writeKeysAndWaitForLogs(int numKeys,
 +      List<OzoneManager> ozoneManagers) throws Exception {
 +
 +    Set<String> writtenKeys = new HashSet<>();
 +    for (int i = 1; i <= numKeys; i++) {
 +      String keyName = KEY_PREFIX + i;
 +      writeTestData(VOLUME, BUCKET, keyName);
 +      writtenKeys.add(keyName);
 +    }
 +
 +    // Make sure all OMs have logs from writing data, so we can check that
 +    // they are purged after prepare.
 +    for (OzoneManager om: ozoneManagers) {
 +      LambdaTestUtils.await(WAIT_TIMEOUT_MILLIS, 1000,
 +          () -> logFilesPresentInRatisPeer(om));
 +    }
 +
 +    return writtenKeys;
 +  }
 +
 +  private void writeTestData(String volumeName,
 +      String bucketName, String keyName) throws Exception {
 +
 +    String keyString = UUID.randomUUID().toString();
 +    byte[] data = ContainerTestHelper.getFixedLengthString(
 +        keyString, 100).getBytes(UTF_8);
 +    OzoneOutputStream keyStream = TestHelper.createKey(
 +        keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
 +        100, store, volumeName, bucketName);
 +    keyStream.write(data);
 +    keyStream.close();
 +  }
 +
 +  private void assertKeysWritten(Set<String> expectedKeys) throws Exception {
 +    assertKeysWritten(expectedKeys, cluster.getOzoneManagersList());
 +  }
 +
 +  private void assertKeysWritten(Set<String> expectedKeys,
 +      List<OzoneManager> ozoneManagers) throws Exception {
 +    for (OzoneManager om: ozoneManagers) {
 +      List<OmKeyInfo> keys = om.getMetadataManager().listKeys(VOLUME,
 +          BUCKET, null, KEY_PREFIX, 100);
 +
 +      Assert.assertEquals(expectedKeys.size(), keys.size());
 +      for (OmKeyInfo keyInfo: keys) {
 +        Assert.assertTrue(expectedKeys.contains(keyInfo.getKeyName()));
 +      }
 +    }
 +  }
 +
 +  private long submitPrepareRequest() throws Exception {
 +    return clientProtocol.getOzoneManagerClient()
 +        .prepareOzoneManager(PREPARE_FLUSH_WAIT_TIMEOUT_SECONDS,
 +            PREPARE_FLUSH_INTERVAL_SECONDS);
 +  }
 +
 +  private void submitCancelPrepareRequest() throws Exception {
 +    clientProtocol.getOzoneManagerClient().cancelOzoneManagerPrepare();
 +  }
 +
 +  private void assertClusterPrepared(long preparedIndex) throws Exception {
 +    assertClusterPrepared(preparedIndex, cluster.getOzoneManagersList());
 +  }
 +
 +  private void assertClusterPrepared(long preparedIndex,
 +      List<OzoneManager> ozoneManagers) throws Exception {
 +
 +    for (OzoneManager om : ozoneManagers) {
 +      // Wait for each OM to be running and transaction info to match to know
 +      // it is prepared.
 +      LambdaTestUtils.await(WAIT_TIMEOUT_MILLIS,
 +          1000, () -> {
 +          if (!om.isRunning()) {
 +            return false;
 +          } else {
 +            boolean preparedAtIndex = false;
 +            OzoneManagerPrepareState.State state =
 +                om.getPrepareState().getState();
 +
 +            if (state.getStatus() == PrepareStatus.PREPARE_COMPLETED) {
 +              if (state.getIndex() == preparedIndex) {
 +                preparedAtIndex = true;
 +              } else {
 +                // State will not change if we are prepared at the wrong index.
 +                // Break out of wait.
 +                throw new Exception("OM " + om.getOMNodeId() + " prepared " +
 +                    "but prepare index " + state.getIndex() + " does not " +
 +                    "match expected prepare index " + preparedIndex);
 +              }
 +            }
 +            return preparedAtIndex;
 +          }
 +        });
 +    }
 +
 +    // Submitting a read request should pass.
 +    clientProtocol.listVolumes(VOLUME, "", 100);
 +
 +    // Submitting write request should fail.
 +    try {
 +      clientProtocol.createVolume("vol");
 +      Assert.fail("Write request should fail when OM is in prepare mode.");
 +    } catch (OMException ex) {
 +      Assert.assertEquals(
 +          OMException.ResultCodes.NOT_SUPPORTED_OPERATION_WHEN_PREPARED,
 +          ex.getResult());
 +    }
 +  }
 +
 +  private void assertClusterNotPrepared() throws Exception {
 +    assertClusterNotPrepared(cluster.getOzoneManagersList());
 +  }
 +
 +  private void assertClusterNotPrepared(List<OzoneManager> ozoneManagers)
 +      throws Exception {
 +    for (OzoneManager om : ozoneManagers) {
 +      LambdaTestUtils.await(WAIT_TIMEOUT_MILLIS,
 +          1000, () -> {
 +            if (!om.isRunning()) {
 +              return false;
 +            } else {
 +              return om.getPrepareState().getState().getStatus() ==
 +                  PrepareStatus.PREPARE_NOT_STARTED;
 +            }
 +          });
 +    }
 +
 +    // Submitting a read request should pass.
 +    clientProtocol.listVolumes(VOLUME, "", 100);
 +
 +    // Submitting write request should also pass.
 +    clientProtocol.createVolume("vol");
 +    clientProtocol.deleteVolume("vol");
 +  }
 +
 +  private void assertRatisLogsCleared() throws Exception {
 +    assertRatisLogsCleared(cluster.getOzoneManagersList());
 +  }
 +
 +  private void assertRatisLogsCleared(List<OzoneManager> ozoneManagers)
 +      throws Exception {
 +    for (OzoneManager om: ozoneManagers) {
 +      LambdaTestUtils.await(WAIT_TIMEOUT_MILLIS, 1000,
 +          () -> !logFilesPresentInRatisPeer(om));
 +    }
 +  }
 +}
diff --cc hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 490e228,38b8c50..1672796
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@@ -110,8 -105,8 +110,9 @@@ message OMRequest 
    required string clientId = 3;
  
    optional UserInfo userInfo = 4;
+   optional uint32 version = 5;
  
-   optional LayoutVersion layoutVersion = 5;
++  optional LayoutVersion layoutVersion = 6;
  
    optional CreateVolumeRequest              createVolumeRequest            = 11;
    optional SetVolumePropertyRequest         setVolumePropertyRequest       = 12;
@@@ -334,12 -319,8 +335,14 @@@ enum Status 
  
      QUOTA_EXCEEDED = 66;
  
-     PERSIST_UPGRADE_TO_LAYOUT_VERSION_FAILED = 67;
-     REMOVE_UPGRADE_TO_LAYOUT_VERSION_FAILED = 68;
-     UPDATE_LAYOUT_VERSION_FAILED = 69;
-     LAYOUT_FEATURE_FINALIZATION_FAILED = 70;
-     PREPARE_FAILED = 71;
-     NOT_SUPPORTED_OPERATION_WHEN_PREPARED = 72;
+     QUOTA_ERROR = 67;
+ 
++    PERSIST_UPGRADE_TO_LAYOUT_VERSION_FAILED = 68;
++    REMOVE_UPGRADE_TO_LAYOUT_VERSION_FAILED = 69;
++    UPDATE_LAYOUT_VERSION_FAILED = 70;
++    LAYOUT_FEATURE_FINALIZATION_FAILED = 71;
++    PREPARE_FAILED = 72;
++    NOT_SUPPORTED_OPERATION_WHEN_PREPARED = 73;
  }
  
  /**
diff --cc hadoop-ozone/ozone-manager/pom.xml
index dab1d40,7b51ff3..995baf5
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@@ -174,28 -158,12 +174,35 @@@ https://maven.apache.org/xsd/maven-4.0.
          </executions>
        </plugin>
        <plugin>
 +        <!--
 +        https://github.com/mojohaus/aspectj-maven-plugin/issues/24#issuecomment-419077658
 +        <groupId>org.codehaus.mojo</groupId>
 +        -->
 +        <groupId>com.github.m50d</groupId>
 +        <artifactId>aspectj-maven-plugin</artifactId>
 +        <version>1.11.1</version>
 +        <configuration>
 +          <source>1.8</source>
 +          <target>1.8</target>
 +        </configuration>
 +        <executions>
 +          <execution>
 +            <goals>
 +              <goal>compile</goal>
 +            </goals>
 +            <configuration>
 +              <complianceLevel>1.8</complianceLevel>
 +            </configuration>
 +          </execution>
 +        </executions>
 +      </plugin>
++      <plugin>
+         <groupId>com.github.spotbugs</groupId>
+         <artifactId>spotbugs-maven-plugin</artifactId>
+         <configuration>
+           <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+         </configuration>
+       </plugin>
      </plugins>
      <testResources>
        <testResource>
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index df29e0e,5b71fbc..5dde7c3
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@@ -216,10 -213,11 +217,12 @@@ import static org.apache.hadoop.ozone.O
  import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
  import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
  import static org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER;
+ import static org.apache.hadoop.ozone.OzoneConsts.DEFAULT_OM_UPDATE_ID;
  import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_FILE;
  import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE;
+ import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_DIR;
  import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT;
 +import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
  import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
  import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS;
  import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT;
@@@ -239,10 -237,8 +242,10 @@@ import static org.apache.hadoop.ozone.o
  import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
  import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
  import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK;
+ import static org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus.LEADER_AND_READY;
  import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneManagerService.newReflectiveBlockingService;
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse.PrepareStatus;
 +
- import org.apache.hadoop.util.Time;
  import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
  import org.apache.ratis.server.protocol.TermIndex;
  import org.apache.ratis.util.ExitUtils;
@@@ -369,18 -362,6 +372,9 @@@ public final class OzoneManager extend
      omStorage = new OMStorage(conf);
      omId = omStorage.getOmId();
  
 +    versionManager = new OMLayoutVersionManager(omStorage);
 +    upgradeFinalizer = new OMUpgradeFinalizer(versionManager);
 +
-     // In case of single OM Node Service there will be no OM Node ID
-     // specified, set it to value from om storage
-     if (this.omNodeDetails.getOMNodeId() == null) {
-       this.omNodeDetails = OMHANodeDetails.getOMNodeDetails(conf,
-           omNodeDetails.getOMServiceId(),
-           omStorage.getOmId(), omNodeDetails.getRpcAddress(),
-           omNodeDetails.getRatisPort());
-     }
- 
      loginOMUserIfSecurityEnabled(conf);
  
      this.allowListAllVolumes = conf.getBoolean(OZONE_OM_VOLUME_LISTALL_ALLOWED,
@@@ -1202,12 -1192,8 +1211,10 @@@
        // Allow OM to start as Http Server failure is not fatal.
        LOG.error("OM HttpServer failed to start.", ex);
      }
 +
      omRpcServer.start();
      isOmRpcServerRunning = true;
 +
-     // TODO: Start this thread only on the leader node.
-     //  Should be fixed after HDDS-4451.
      startTrashEmptier(configuration);
  
      registerMXBean();
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
index 0000000,d7606d2..b3eec22
mode 000000,100644..100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
@@@ -1,0 -1,534 +1,534 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with this
+  * work for additional information regarding copyright ownership.  The ASF
+  * licenses this file to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance with the License.
+  * You may obtain a copy of the License at
+  * <p>
+  * http://www.apache.org/licenses/LICENSE-2.0
+  * <p>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  * License for the specific language governing permissions and limitations under
+  * the License.
+  */
+ package org.apache.hadoop.ozone.om;
+ 
+ import com.google.common.base.Preconditions;
+ import com.google.protobuf.RpcController;
+ import org.apache.hadoop.ozone.om.exceptions.OMException;
+ import org.apache.hadoop.fs.FSDataInputStream;
+ import org.apache.hadoop.fs.permission.FsPermission;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.fs.FSDataOutputStream;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+ import org.apache.hadoop.hdds.utils.db.Table;
+ import org.apache.hadoop.hdds.utils.db.TableIterator;
+ import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+ import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+ import org.apache.hadoop.ozone.OFSPath;
+ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+ import org.apache.hadoop.ozone.om.request.OMClientRequest;
+ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+ import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.Progressable;
+ import org.apache.ratis.protocol.ClientId;
+ import org.apache.ratis.protocol.Message;
+ import org.apache.ratis.protocol.RaftClientRequest;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import java.io.IOException;
+ import java.net.InetAddress;
+ import java.net.URI;
+ import java.util.ArrayList;
+ import java.util.Collection;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Iterator;
+ import java.util.concurrent.atomic.AtomicLong;
+ 
+ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
+ import static org.apache.hadoop.ozone.om.helpers.OzoneFSUtils.addTrailingSlashIfNeeded;
+ import static org.apache.hadoop.ozone.om.helpers.OzoneFSUtils.pathToKey;
+ 
+ /**
+  * FileSystem to be used by the Trash Emptier.
+  * Only the apis used by the trash emptier are implemented.
+  */
+ public class TrashOzoneFileSystem extends FileSystem {
+ 
+   private static final RpcController NULL_RPC_CONTROLLER = null;
+ 
+   private static final int OZONE_FS_ITERATE_BATCH_SIZE = 100;
+ 
+   private final OzoneManager ozoneManager;
+ 
+   private final String userName;
+ 
+   private String ofsPathPrefix;
+ 
+   private final AtomicLong runCount;
+ 
+   private static final ClientId CLIENT_ID = ClientId.randomId();
+ 
+   private static final Logger LOG =
+       LoggerFactory.getLogger(TrashOzoneFileSystem.class);
+ 
+   public TrashOzoneFileSystem(OzoneManager ozoneManager) throws IOException {
+     this.ozoneManager = ozoneManager;
+     this.userName =
+           UserGroupInformation.getCurrentUser().getShortUserName();
+     this.runCount = new AtomicLong(0);
+   }
+ 
+   private RaftClientRequest getRatisRequest(
+       OzoneManagerProtocolProtos.OMRequest omRequest) {
+     return RaftClientRequest.newBuilder()
+         .setClientId(CLIENT_ID)
+         .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId())
+         .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId())
+         .setCallId(runCount.getAndIncrement())
+         .setMessage(
+             Message.valueOf(
+                 OMRatisHelper.convertRequestToByteString(omRequest)))
+         .setType(RaftClientRequest.writeRequestType())
+         .build();
+ 
+   }
+ 
+   private void submitRequest(OzoneManagerProtocolProtos.OMRequest omRequest)
+       throws Exception {
+     ozoneManager.getMetrics().incNumTrashWriteRequests();
+     if (ozoneManager.isRatisEnabled()) {
+       OMClientRequest omClientRequest =
 -          OzoneManagerRatisUtils.createClientRequest(omRequest);
++          OzoneManagerRatisUtils.getRequest(ozoneManager, omRequest);
+       omRequest = omClientRequest.preExecute(ozoneManager);
+       RaftClientRequest req = getRatisRequest(omRequest);
+       ozoneManager.getOmRatisServer().submitRequest(omRequest, req);
+     } else {
+       ozoneManager.getOmServerProtocol().
+           submitRequest(NULL_RPC_CONTROLLER, omRequest);
+     }
+   }
+ 
+   @Override
+   public URI getUri() {
+     throw new UnsupportedOperationException(
+         "fs.getUri() not implemented in TrashOzoneFileSystem");
+   }
+ 
+   @Override
+   public FSDataInputStream open(Path path, int i) {
+     throw new UnsupportedOperationException(
+         "fs.open() not implemented in TrashOzoneFileSystem");
+   }
+ 
+   @Override
+   public FSDataOutputStream create(Path path,
+       FsPermission fsPermission,
+       boolean b, int i, short i1,
+       long l, Progressable progressable){
+     throw new UnsupportedOperationException(
+         "fs.create() not implemented in TrashOzoneFileSystem");
+   }
+ 
+   @Override
+   public FSDataOutputStream append(Path path, int i,
+       Progressable progressable) {
+     throw new UnsupportedOperationException(
+         "fs.append() not implemented in TrashOzoneFileSystem");
+   }
+ 
+   @Override
+   public boolean rename(Path src, Path dst) throws IOException {
+     ozoneManager.getMetrics().incNumTrashRenames();
+     LOG.trace("Src:" + src + "Dst:" + dst);
+     // check whether the src and dst belong to the same bucket & trashroot.
+     OFSPath srcPath = new OFSPath(src);
+     OFSPath dstPath = new OFSPath(dst);
+     Preconditions.checkArgument(srcPath.getBucketName().
+         equals(dstPath.getBucketName()));
+     Preconditions.checkArgument(srcPath.getTrashRoot().
+         toString().equals(dstPath.getTrashRoot().toString()));
+     RenameIterator iterator = new RenameIterator(src, dst);
+     iterator.iterate();
+     return true;
+   }
+ 
+   @Override
+   public boolean delete(Path path, boolean b) throws IOException {
+     ozoneManager.getMetrics().incNumTrashDeletes();
+     DeleteIterator iterator = new DeleteIterator(path, true);
+     iterator.iterate();
+     return true;
+   }
+ 
+   @Override
+   public FileStatus[] listStatus(Path path) throws  IOException {
+     ozoneManager.getMetrics().incNumTrashListStatus();
+     List<FileStatus> fileStatuses = new ArrayList<>();
+     OmKeyArgs keyArgs = constructOmKeyArgs(path);
+     List<OzoneFileStatus> list = ozoneManager.
+         listStatus(keyArgs, false, null, Integer.MAX_VALUE);
+     for (OzoneFileStatus status : list) {
+       FileStatus fileStatus = convertToFileStatus(status);
+       fileStatuses.add(fileStatus);
+     }
+     return fileStatuses.toArray(new FileStatus[0]);
+   }
+ 
+ 
+   /**
+    * converts OzoneFileStatus object to FileStatus.
+    */
+   private FileStatus convertToFileStatus(OzoneFileStatus status) {
+     Path temp = new Path(ofsPathPrefix +
+         OZONE_URI_DELIMITER + status.getKeyInfo().getKeyName());
+     return new FileStatus(
+         status.getKeyInfo().getDataSize(),
+         status.isDirectory(),
+         status.getKeyInfo().getFactor().getNumber(),
+         status.getBlockSize(),
+         status.getKeyInfo().getModificationTime(),
+         temp
+     );
+   }
+ 
+   @Override
+   public void setWorkingDirectory(Path path) {
+     throw new UnsupportedOperationException(
+         "fs.setWorkingDirectory() not implemented in TrashOzoneFileSystem");
+   }
+ 
+   @Override
+   public Path getWorkingDirectory() {
+     throw new UnsupportedOperationException(
+         "fs.getWorkingDirectory() not implemented in TrashOzoneFileSystem");
+   }
+ 
+   @Override
+   public boolean mkdirs(Path path,
+       FsPermission fsPermission) {
+     throw new UnsupportedOperationException(
+         "fs.mkdirs() not implemented in TrashOzoneFileSystem");
+   }
+ 
+ 
+   @Override
+   public FileStatus getFileStatus(Path path) throws IOException {
+     ozoneManager.getMetrics().incNumGetFileStatus();
+     OmKeyArgs keyArgs = constructOmKeyArgs(path);
+     OzoneFileStatus ofs = ozoneManager.getKeyManager().getFileStatus(keyArgs);
+     FileStatus fileStatus = convertToFileStatus(ofs);
+     return fileStatus;
+   }
+ 
+   private OmKeyArgs constructOmKeyArgs(Path path) {
+     OFSPath ofsPath = new OFSPath(path);
+     String volume = ofsPath.getVolumeName();
+     String bucket = ofsPath.getBucketName();
+     String key = ofsPath.getKeyName();
+     OmKeyArgs keyArgs = new OmKeyArgs.Builder()
+         .setVolumeName(volume)
+         .setBucketName(bucket)
+         .setKeyName(key)
+         .build();
+     this.ofsPathPrefix = OZONE_URI_DELIMITER +
+         volume + OZONE_URI_DELIMITER + bucket;
+     return keyArgs;
+   }
+ 
+   @Override
+   public Collection<FileStatus> getTrashRoots(boolean allUsers) {
+     Preconditions.checkArgument(allUsers);
+     ozoneManager.getMetrics().incNumTrashGetTrashRoots();
+     Iterator<Map.Entry<CacheKey<String>,
+         CacheValue<OmBucketInfo>>> bucketIterator =
+         ozoneManager.getMetadataManager().getBucketIterator();
+     List<FileStatus> ret = new ArrayList<>();
+     while (bucketIterator.hasNext()){
+       Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry =
+           bucketIterator.next();
+       OmBucketInfo omBucketInfo = entry.getValue().getCacheValue();
+       Path volumePath = new Path(OZONE_URI_DELIMITER,
+           omBucketInfo.getVolumeName());
+       Path bucketPath = new Path(volumePath, omBucketInfo.getBucketName());
+       Path trashRoot = new Path(bucketPath, FileSystem.TRASH_PREFIX);
+       try {
+         if (exists(trashRoot)) {
+           FileStatus[] list = this.listStatus(trashRoot);
+           for (FileStatus candidate : list) {
+             if (exists(candidate.getPath()) && candidate.isDirectory()) {
+               ret.add(candidate);
+             }
+           }
+         }
+       } catch (Exception e){
+         LOG.error("Couldn't perform fs operation " +
+             "fs.listStatus()/fs.exists()", e);
+       }
+     }
+     return ret;
+   }
+ 
+   @Override
+   public boolean exists(Path f) throws IOException {
+     ozoneManager.getMetrics().incNumTrashExists();
+     try {
+       this.getFileStatus(f);
+       return true;
+     } catch (OMException e) {
+       if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) {
+         LOG.trace("Couldn't execute getFileStatus()", e);
+         return false;
+       } else {
+         throw e;
+       }
+     }
+   }
+ 
+   private abstract class OzoneListingIterator {
+     private final Path path;
+     private final FileStatus status;
+     private String pathKey;
+     private TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+           keyIterator;
+ 
+     OzoneListingIterator(Path path)
+           throws IOException {
+       this.path = path;
+       this.status = getFileStatus(path);
+       this.pathKey = pathToKey(path);
+       if (status.isDirectory()) {
+         this.pathKey = addTrailingSlashIfNeeded(pathKey);
+       }
+       keyIterator = ozoneManager.getMetadataManager().getKeyIterator();
+     }
+ 
+       /**
+        * The output of processKey determines if further iteration through the
+        * keys should be done or not.
+        *
+        * @return true if we should continue iteration of keys, false otherwise.
+        * @throws IOException
+        */
+     abstract boolean processKeyPath(List<String> keyPathList)
+           throws IOException;
+ 
+       /**
+        * Iterates through all the keys prefixed with the input path's key and
+        * processes the key though processKey().
+        * If for any key, the processKey() returns false, then the iteration is
+        * stopped and returned with false indicating that all the keys could not
+        * be processed successfully.
+        *
+        * @return true if all keys are processed successfully, false otherwise.
+        * @throws IOException
+        */
+     boolean iterate() throws IOException {
+       LOG.trace("Iterating path: {}", path);
+       List<String> keyPathList = new ArrayList<>();
+       if (status.isDirectory()) {
+         LOG.trace("Iterating directory: {}", pathKey);
+         OFSPath ofsPath = new OFSPath(pathKey);
+         String ofsPathprefix =
+             ofsPath.getNonKeyPathNoPrefixDelim() + OZONE_URI_DELIMITER;
+         while (keyIterator.hasNext()) {
+           Table.KeyValue< String, OmKeyInfo > kv = keyIterator.next();
+           String keyPath = ofsPathprefix + kv.getValue().getKeyName();
+           LOG.trace("iterating key path: {}", keyPath);
+           if (!kv.getValue().getKeyName().equals("")
+               && kv.getKey().startsWith("/" + pathKey)) {
+             keyPathList.add(keyPath);
+           }
+           if (keyPathList.size() >= OZONE_FS_ITERATE_BATCH_SIZE) {
+             if (!processKeyPath(keyPathList)) {
+               return false;
+             } else {
+               keyPathList.clear();
+             }
+           }
+         }
+         if (keyPathList.size() > 0) {
+           if (!processKeyPath(keyPathList)) {
+             return false;
+           }
+         }
+         return true;
+       } else {
+         LOG.trace("iterating file: {}", path);
+         keyPathList.add(pathKey);
+         return processKeyPath(keyPathList);
+       }
+     }
+ 
+     FileStatus getStatus() {
+       return status;
+     }
+   }
+ 
+   private class RenameIterator extends OzoneListingIterator {
+     private final String srcPath;
+     private final String dstPath;
+ 
+     RenameIterator(Path srcPath, Path dstPath)
+         throws IOException {
+       super(srcPath);
+       this.srcPath = pathToKey(srcPath);
+       this.dstPath = pathToKey(dstPath);
+       LOG.trace("rename from:{} to:{}", this.srcPath, this.dstPath);
+     }
+ 
+     @Override
+     boolean processKeyPath(List<String> keyPathList) {
+       for (String keyPath : keyPathList) {
+         String newPath = dstPath.concat(keyPath.substring(srcPath.length()));
+         OFSPath src = new OFSPath(keyPath);
+         OFSPath dst = new OFSPath(newPath);
+ 
+         OzoneManagerProtocolProtos.OMRequest omRequest =
+             getRenameKeyRequest(src, dst);
+         try {
+           ozoneManager.getMetrics().incNumTrashFilesRenames();
+           submitRequest(omRequest);
+         } catch (Throwable e) {
+           LOG.error("Couldn't send rename request.", e);
+         }
+ 
+       }
+       return true;
+     }
+ 
+     private OzoneManagerProtocolProtos.OMRequest
+         getRenameKeyRequest(
+         OFSPath src, OFSPath dst) {
+       String volumeName = src.getVolumeName();
+       String bucketName = src.getBucketName();
+       String keyName = src.getKeyName();
+ 
+       OzoneManagerProtocolProtos.KeyArgs keyArgs =
+           OzoneManagerProtocolProtos.KeyArgs.newBuilder()
+               .setKeyName(keyName)
+               .setVolumeName(volumeName)
+               .setBucketName(bucketName)
+               .build();
+       String toKeyName = dst.getKeyName();
+       OzoneManagerProtocolProtos.RenameKeyRequest renameKeyRequest =
+           OzoneManagerProtocolProtos.RenameKeyRequest.newBuilder()
+               .setKeyArgs(keyArgs)
+               .setToKeyName(toKeyName)
+               .build();
+       OzoneManagerProtocolProtos.OMRequest omRequest =
+           null;
+       try {
+         omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder()
+             .setClientId(CLIENT_ID.toString())
+             .setUserInfo(getUserInfo())
+             .setRenameKeyRequest(renameKeyRequest)
+             .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
+             .build();
+       } catch (IOException e) {
+         LOG.error("Couldn't get userinfo", e);
+       }
+       return omRequest;
+     }
+   }
+ 
+   private class DeleteIterator extends OzoneListingIterator {
+     private final boolean recursive;
+     private List<String> keysList;
+ 
+ 
+     DeleteIterator(Path f, boolean recursive)
+         throws IOException {
+       super(f);
+       this.recursive = recursive;
+       keysList = new ArrayList<>();
+       if (getStatus().isDirectory()
+           && !this.recursive
+           && listStatus(f).length != 0) {
+         throw new PathIsNotEmptyDirectoryException(f.toString());
+       }
+     }
+ 
+     @Override
+     boolean processKeyPath(List<String> keyPathList) {
+       LOG.trace("Deleting keys: {}", keyPathList);
+       for (String keyPath : keyPathList) {
+         OFSPath path = new OFSPath(keyPath);
+         OzoneManagerProtocolProtos.OMRequest omRequest =
+             getDeleteKeyRequest(path);
+         try {
+           ozoneManager.getMetrics().incNumTrashFilesDeletes();
+           submitRequest(omRequest);
+         } catch (Throwable e) {
+           LOG.error("Couldn't send rename request.", e);
+         }
+       }
+       return true;
+     }
+ 
+     private OzoneManagerProtocolProtos.OMRequest
+         getDeleteKeyRequest(
+         OFSPath keyPath) {
+       String volumeName = keyPath.getVolumeName();
+       String bucketName = keyPath.getBucketName();
+       String keyName = keyPath.getKeyName();
+       keysList.clear();
+       // Keys List will have only 1 entry.
+       keysList.add(keyName);
+       OzoneManagerProtocolProtos.DeleteKeyArgs.Builder deleteKeyArgs =
+           OzoneManagerProtocolProtos.DeleteKeyArgs.newBuilder()
+               .setBucketName(bucketName)
+               .setVolumeName(volumeName);
+       deleteKeyArgs.addAllKeys(keysList);
+       OzoneManagerProtocolProtos.DeleteKeysRequest deleteKeysRequest =
+           OzoneManagerProtocolProtos.DeleteKeysRequest.newBuilder()
+               .setDeleteKeys(deleteKeyArgs)
+               .build();
+       OzoneManagerProtocolProtos.OMRequest omRequest =
+           null;
+       try {
+         omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder()
+             .setClientId(CLIENT_ID.toString())
+             .setUserInfo(getUserInfo())
+             .setDeleteKeysRequest(deleteKeysRequest)
+             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKeys)
+             .build();
+       } catch (IOException e) {
+         LOG.error("Couldn't get userinfo", e);
+       }
+       return omRequest;
+     }
+   }
+ 
+   OzoneManagerProtocolProtos.UserInfo getUserInfo() throws IOException {
+     UserGroupInformation user = UserGroupInformation.getCurrentUser();
+     InetAddress remoteAddress = ozoneManager.getOmRpcServerAddr().getAddress();
+     OzoneManagerProtocolProtos.UserInfo.Builder userInfo =
+         OzoneManagerProtocolProtos.UserInfo.newBuilder();
+     if (user != null) {
+       userInfo.setUserName(user.getUserName());
+     }
+ 
+     if (remoteAddress != null) {
+       userInfo.setHostName(remoteAddress.getHostName());
+       userInfo.setRemoteAddress(remoteAddress.getHostAddress());
+     }
+ 
+     return userInfo.build();
+   }
+ 
+ 
+ 
+ }
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index 43f01bf,828c9e9..e8652a1
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@@ -91,12 -90,8 +91,12 @@@ public abstract class OMClientRequest i
     */
    public OMRequest preExecute(OzoneManager ozoneManager)
        throws IOException {
 +    LayoutVersion layoutVersion = LayoutVersion.newBuilder()
 +        .setVersion(ozoneManager.getVersionManager().getMetadataLayoutVersion())
 +        .build();
-     omRequest =
-         getOmRequest().toBuilder()
-             .setUserInfo(getUserInfo()).setLayoutVersion(layoutVersion).build();
+     omRequest = getOmRequest().toBuilder()
 -        .setUserInfo(getUserIfNotExists(ozoneManager)).build();
++        .setUserInfo(getUserIfNotExists(ozoneManager))
++        .setLayoutVersion(layoutVersion).build();
      return omRequest;
    }
  
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java
index 565871e,296751a..be575fc
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java
@@@ -18,17 -18,19 +18,22 @@@
  
  package org.apache.hadoop.ozone.om.request.bucket.acl;
  
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.AddAcl;
 +
  import java.io.IOException;
  import java.util.List;
+ import java.util.Map;
  
  import com.google.common.collect.Lists;
+ import org.apache.hadoop.ozone.audit.AuditLogger;
+ import org.apache.hadoop.ozone.audit.OMAction;
  import org.apache.hadoop.ozone.om.OMMetrics;
  import org.apache.hadoop.ozone.om.OzoneManager;
  import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
  import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
+ import org.apache.hadoop.ozone.security.acl.OzoneObj;
+ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
  import org.apache.hadoop.ozone.util.BooleanBiFunction;
  import org.apache.hadoop.util.Time;
  import org.slf4j.Logger;
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java
index 3932c0f,5e32295..6f58886
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java
@@@ -18,15 -18,17 +18,20 @@@
  
  package org.apache.hadoop.ozone.om.request.bucket.acl;
  
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RemoveAcl;
 +
  import java.io.IOException;
  import java.util.List;
+ import java.util.Map;
  
+ import org.apache.hadoop.ozone.audit.AuditLogger;
+ import org.apache.hadoop.ozone.audit.OMAction;
  import org.apache.hadoop.ozone.om.OzoneManager;
  import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
  import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
+ import org.apache.hadoop.ozone.security.acl.OzoneObj;
+ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
  import org.apache.hadoop.util.Time;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
index e4e64ba,8cfe29c..c7d1e8f
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
@@@ -27,7 -28,8 +30,9 @@@ import org.apache.hadoop.ozone.audit.OM
  import org.apache.hadoop.ozone.om.OzoneManager;
  import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
  import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
+ import org.apache.hadoop.ozone.security.acl.OzoneObj;
+ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
  import org.apache.hadoop.util.Time;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index 8d3e700,0619062..ed3cc4f
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@@ -300,16 -300,18 +300,18 @@@ public class OMFileCreateRequest extend
            trxnLogIndex);
  
        omBucketInfo.incrUsedBytes(preAllocatedSpace);
+       // Update namespace quota
+       omBucketInfo.incrUsedNamespace(1L);
  
+       numMissingParents = missingParentInfos.size();
        // Prepare response
        omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
-           .setKeyInfo(omKeyInfo.getProtobuf())
+           .setKeyInfo(omKeyInfo.getProtobuf(getOmRequest().getVersion()))
            .setID(clientID)
            .setOpenVersion(openVersion).build())
 -          .setCmdType(Type.CreateFile);
 +          .setCmdType(CreateFile);
        omClientResponse = new OMFileCreateResponse(omResponse.build(),
-           omKeyInfo, missingParentInfos, clientID, omVolumeArgs,
-           omBucketInfo.copyObject());
+           omKeyInfo, missingParentInfos, clientID, omBucketInfo.copyObject());
  
        result = Result.SUCCESS;
      } catch (IOException ex) {
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
index 6a4922c,76a3a6a..abbe80c
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java
@@@ -18,10 -18,9 +18,11 @@@
  
  package org.apache.hadoop.ozone.om.request.key.acl;
  
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.AddAcl;
 +
  import java.io.IOException;
  import java.util.List;
+ import java.util.Map;
  
  import com.google.common.collect.Lists;
  import org.apache.hadoop.ozone.OzoneAcl;
@@@ -31,7 -33,8 +35,9 @@@ import org.apache.hadoop.ozone.om.ratis
  import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
  import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
+ import org.apache.hadoop.ozone.security.acl.OzoneObj;
+ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
  import org.apache.hadoop.util.Time;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
index 2484958,13190df..8608f6f
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java
@@@ -18,10 -18,9 +18,11 @@@
  
  package org.apache.hadoop.ozone.om.request.key.acl;
  
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RemoveAcl;
 +
  import java.io.IOException;
  import java.util.List;
+ import java.util.Map;
  
  import com.google.common.collect.Lists;
  import org.apache.hadoop.ozone.OzoneAcl;
@@@ -31,7 -33,8 +35,9 @@@ import org.apache.hadoop.ozone.om.ratis
  import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
  import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
+ import org.apache.hadoop.ozone.security.acl.OzoneObj;
+ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
  import org.apache.hadoop.util.Time;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
index a5d736f,90a2fe6..31c165e
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java
@@@ -18,10 -18,9 +18,11 @@@
  
  package org.apache.hadoop.ozone.om.request.key.acl;
  
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.SetAcl;
 +
  import java.io.IOException;
  import java.util.List;
+ import java.util.Map;
  
  import com.google.common.collect.Lists;
  import org.apache.hadoop.ozone.OzoneAcl;
@@@ -32,7 -34,8 +36,9 @@@ import org.apache.hadoop.ozone.om.ratis
  import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
  import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
+ import org.apache.hadoop.ozone.security.acl.OzoneObj;
+ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
  import org.apache.hadoop.util.Time;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
index e4dcea6,0c67b70..ada76ab
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java
@@@ -18,12 -18,14 +18,16 @@@
  
  package org.apache.hadoop.ozone.om.request.key.acl.prefix;
  
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.AddAcl;
 +
  import java.io.IOException;
  import java.util.List;
+ import java.util.Map;
  
  import com.google.common.collect.Lists;
+ import org.apache.hadoop.ozone.OzoneConsts;
+ import org.apache.hadoop.ozone.audit.AuditLogger;
+ import org.apache.hadoop.ozone.audit.OMAction;
  import org.apache.hadoop.ozone.om.OMMetrics;
  import org.apache.hadoop.ozone.om.PrefixManagerImpl;
  import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult;
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
index 7af93ae,0c33866..fd26e77
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java
@@@ -18,12 -18,14 +18,16 @@@
  
  package org.apache.hadoop.ozone.om.request.key.acl.prefix;
  
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RemoveAcl;
 +
  import java.io.IOException;
  import java.util.List;
+ import java.util.Map;
  
  import com.google.common.collect.Lists;
+ import org.apache.hadoop.ozone.OzoneConsts;
+ import org.apache.hadoop.ozone.audit.AuditLogger;
+ import org.apache.hadoop.ozone.audit.OMAction;
  import org.apache.hadoop.ozone.om.OMMetrics;
  import org.apache.hadoop.ozone.om.PrefixManagerImpl;
  import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult;
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java
index 6b57bd8,0000000..66d0d5f
mode 100644,000000..100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java
@@@ -1,245 -1,0 +1,245 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership.  The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +
 +package org.apache.hadoop.ozone.om.request.upgrade;
 +
 +import org.apache.hadoop.ozone.om.OMMetadataManager;
 +import org.apache.hadoop.ozone.om.OzoneManager;
 +import org.apache.hadoop.ozone.om.exceptions.OMException;
 +import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
 +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
 +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 +import org.apache.hadoop.ozone.om.request.OMClientRequest;
 +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 +import org.apache.hadoop.ozone.om.response.OMClientResponse;
 +import org.apache.hadoop.ozone.om.response.upgrade.OMPrepareResponse;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareResponse;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 +
 +import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 +
 +import org.apache.ratis.server.RaftServer;
 +import org.apache.ratis.server.raftlog.RaftLog;
 +import org.apache.ratis.statemachine.StateMachine;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import java.io.IOException;
 +import java.time.Duration;
 +import java.time.temporal.ChronoUnit;
 +import java.util.concurrent.CompletableFuture;
 +
 +/**
 + * OM Request used to flush all transactions to disk, take a DB snapshot, and
 + * purge the logs, leaving Ratis in a clean state without unapplied log
 + * entries. This prepares the OM for upgrades/downgrades so that no request
 + * in the log is applied to the database in the old version of the code in one
 + * OM, and the new version of the code on another OM.
 + */
 +public class OMPrepareRequest extends OMClientRequest {
 +  private static final Logger LOG =
 +      LoggerFactory.getLogger(OMPrepareRequest.class);
 +
 +  public OMPrepareRequest(OMRequest omRequest) {
 +    super(omRequest);
 +  }
 +
 +  @Override
 +  public OMClientResponse validateAndUpdateCache(
 +      OzoneManager ozoneManager, long transactionLogIndex,
 +      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
 +
 +    LOG.info("OM {} Received prepare request with log index {}",
 +        ozoneManager.getOMNodeId(), transactionLogIndex);
 +
 +    OMRequest omRequest = getOmRequest();
 +    OzoneManagerProtocolProtos.PrepareRequestArgs args =
 +        omRequest.getPrepareRequest().getArgs();
 +    OMResponse.Builder responseBuilder =
 +        OmResponseUtil.getOMResponseBuilder(omRequest);
 +    responseBuilder.setCmdType(Type.Prepare);
 +    OMClientResponse response = null;
 +
 +    // Allow double buffer this many seconds to flush all transactions before
 +    // returning an error to the caller.
 +    Duration flushTimeout =
 +        Duration.of(args.getTxnApplyWaitTimeoutSeconds(), ChronoUnit.SECONDS);
 +    // Time between checks to see if double buffer finished flushing.
 +    Duration flushCheckInterval =
 +        Duration.of(args.getTxnApplyCheckIntervalSeconds(), ChronoUnit.SECONDS);
 +
 +    try {
 +      // Create response.
 +      // DB snapshot for prepare will include the transaction to commit it,
 +      // making the prepare index one more than this txn's log index.
 +      long prepareIndex = transactionLogIndex + 1;
 +      PrepareResponse omResponse = PrepareResponse.newBuilder()
 +              .setTxnID(prepareIndex)
 +              .build();
 +      responseBuilder.setPrepareResponse(omResponse);
 +      response = new OMPrepareResponse(responseBuilder.build());
 +
 +      // Add response to double buffer before clearing logs.
 +      // This guarantees the log index of this request will be the same as
 +      // the snapshot index in the prepared state.
 +      ozoneManagerDoubleBufferHelper.add(response, transactionLogIndex);
 +
 +      OzoneManagerRatisServer omRatisServer = ozoneManager.getOmRatisServer();
 +      RaftServer.Division division =
 +          omRatisServer.getServer()
 +              .getDivision(omRatisServer.getRaftGroup().getGroupId());
 +
 +      // Wait for outstanding double buffer entries to flush to disk,
 +      // so they will not be purged from the log before being persisted to
 +      // the DB.
 +      // Since the response for this request was added to the double buffer
 +      // already, once this index reaches the state machine, we know all
 +      // transactions have been flushed.
 +      waitForLogIndex(transactionLogIndex,
 +          ozoneManager.getMetadataManager(), division,
 +          flushTimeout, flushCheckInterval);
 +
 +      long snapshotIndex = takeSnapshotAndPurgeLogs(division);
 +      if (snapshotIndex != prepareIndex) {
 +        LOG.warn("Snapshot index {} does not " +
 +            "match expected prepare index {}.", snapshotIndex, prepareIndex);
 +      }
 +
 +      // Save transaction log index to a marker file, so if the OM restarts,
 +      // it will remain in prepare mode on that index as long as the file
 +      // exists.
 +      ozoneManager.getPrepareState().finishPrepare(prepareIndex);
 +
 +      LOG.info("OM {} prepared at log index {}. Returning response {} with " +
 +          "log index {}", ozoneManager.getOMNodeId(), prepareIndex, omResponse,
 +          omResponse.getTxnID());
 +    } catch (OMException e) {
 +      LOG.error("Prepare Request Apply failed in {}. ",
 +          ozoneManager.getOMNodeId(), e);
 +      response = new OMPrepareResponse(
 +          createErrorOMResponse(responseBuilder, e));
 +    } catch (InterruptedException | IOException e) {
 +      // Set error code so that prepare failure does not cause the OM to
 +      // terminate.
 +      LOG.error("Prepare Request Apply failed in {}. ",
 +          ozoneManager.getOMNodeId(), e);
 +      response = new OMPrepareResponse(
 +          createErrorOMResponse(responseBuilder, new OMException(e,
 +              OMException.ResultCodes.PREPARE_FAILED)));
 +    }
 +
 +    return response;
 +  }
 +
 +  /**
 +   * Waits for the specified index to be flushed to the state machine on
 +   * disk, and to be updated in memory in Ratis.
 +   */
 +  private static void waitForLogIndex(long indexToWaitFor,
 +      OMMetadataManager metadataManager, RaftServer.Division division,
 +      Duration flushTimeout, Duration flushCheckInterval)
 +      throws InterruptedException, IOException {
 +
 +    long endTime = System.currentTimeMillis() + flushTimeout.toMillis();
 +    boolean success = false;
 +
 +    while (!success && System.currentTimeMillis() < endTime) {
 +      // If no transactions have been persisted to the DB, transaction info
 +      // will be null, not zero, causing a null pointer exception within
 +      // ozoneManager#getRatisSnaphotIndex.
 +      // Get the transaction directly instead to handle the case when it is
 +      // null.
 +      OMTransactionInfo dbTxnInfo = metadataManager
 +          .getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
 +      long ratisTxnIndex =
 +          division.getStateMachine().getLastAppliedTermIndex().getIndex();
 +
 +      // Ratis may apply meta transactions after the prepare request, causing
 +      // its in memory index to always be greater than the DB index.
 +      if (dbTxnInfo == null) {
 +        // If there are no transactions in the DB, we are prepared to log
 +        // index 0 only.
 +        success = (indexToWaitFor == 0)
 +            && (ratisTxnIndex >= indexToWaitFor);
 +      } else {
 +        success = (dbTxnInfo.getTransactionIndex() == indexToWaitFor)
 +            && (ratisTxnIndex >= indexToWaitFor);
 +      }
 +
 +      if (!success) {
 +        Thread.sleep(flushCheckInterval.toMillis());
 +      }
 +    }
 +
 +    // If the timeout waiting for all transactions to reach the state machine
 +    // is exceeded, the exception is propagated, resulting in an error response
 +    // to the client. They can retry the prepare request.
 +    if (!success) {
 +      throw new IOException(String.format("After waiting for %d seconds, " +
 +              "State Machine has not applied  all the transactions.",
 +          flushTimeout.getSeconds()));
 +    }
 +  }
 +
 +  /**
 +   * Take a snapshot of the state machine at the last index, and purge ALL logs.
 +   * @param division Raft server division.
 +   * @return The index the snapshot was taken on.
 +   * @throws IOException on Error.
 +   */
 +  public static long takeSnapshotAndPurgeLogs(RaftServer.Division division)
 +      throws IOException {
 +
 +    StateMachine stateMachine = division.getStateMachine();
 +    long snapshotIndex = stateMachine.takeSnapshot();
 +    RaftLog raftLog = division.getRaftLog();
 +    long raftLogIndex = raftLog.getLastEntryTermIndex().getIndex();
 +
 +    // We can have a case where the log has a meta transaction after the
 +    // prepare request or another prepare request. If there is another
 +    // prepare request, this one will end up purging that request.
 +    // This means that an OM cannot support 2 prepare requests in the
 +    // transaction pipeline (un-applied) at the same time.
 +    if (raftLogIndex > snapshotIndex) {
 +      LOG.warn("Snapshot index {} does not " +
 +          "match last log index {}.", snapshotIndex, raftLogIndex);
 +      snapshotIndex = raftLogIndex;
 +    }
 +
 +    CompletableFuture<Long> purgeFuture =
-         raftLog.syncWithSnapshot(snapshotIndex);
++        raftLog.onSnapshotInstalled(snapshotIndex);
 +
 +    try {
 +      Long purgeIndex = purgeFuture.get();
 +      if (purgeIndex != snapshotIndex) {
 +        throw new IOException("Purge index " + purgeIndex +
 +            " does not match last index " + snapshotIndex);
 +      }
 +    } catch (Exception e) {
 +      throw new IOException("Unable to purge logs.", e);
 +    }
 +
 +    return snapshotIndex;
 +  }
 +
 +  public static String getRequestType() {
 +    return Type.Prepare.name();
 +  }
 +}
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
index 50afa1a,8df7792..03956a2
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
@@@ -32,7 -32,8 +34,9 @@@ import org.apache.hadoop.ozone.om.respo
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
+ import org.apache.hadoop.ozone.security.acl.OzoneObj;
+ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
  import org.apache.hadoop.util.Time;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
@@@ -131,12 -140,11 +143,15 @@@ public class OMVolumeAddAclRequest exte
        LOG.error("Unrecognized Result for OMVolumeAddAclRequest: {}",
            getOmRequest());
      }
+ 
+     auditLog(auditLogger, buildAuditMessage(OMAction.ADD_ACL, auditMap,
+         ex, getOmRequest().getUserInfo()));
    }
  
 +  public static String getRequestType() {
 +    return AddAcl.name() + "-" + ObjectType.VOLUME;
 +  }
 +
    @Override
    public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
        long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
index cc5ac72,4ab55f3..9277c04
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
@@@ -32,7 -32,8 +34,9 @@@ import org.apache.hadoop.ozone.om.respo
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
+ import org.apache.hadoop.ozone.security.acl.OzoneObj;
+ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
  import org.apache.hadoop.util.Time;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
@@@ -130,12 -140,10 +143,14 @@@ public class OMVolumeRemoveAclRequest e
        LOG.error("Unrecognized Result for OMVolumeRemoveAclRequest: {}",
            getOmRequest());
      }
+     auditLog(auditLogger, buildAuditMessage(OMAction.REMOVE_ACL, auditMap,
+         ex, getOmRequest().getUserInfo()));
    }
  
 +  public static String getRequestType() {
 +    return RemoveAcl.name() + "-" + ObjectType.VOLUME;
 +  }
 +
    @Override
    public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
        long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
index 0c56af5,7102509..d6a054c
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
@@@ -31,7 -31,8 +33,9 @@@ import org.apache.hadoop.ozone.om.respo
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
  import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
+ import org.apache.hadoop.ozone.security.acl.OzoneObj;
+ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
  import org.apache.hadoop.util.Time;
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
@@@ -127,12 -137,11 +140,15 @@@ public class OMVolumeSetAclRequest exte
        LOG.error("Unrecognized Result for OMVolumeSetAclRequest: {}",
            getOmRequest());
      }
+ 
+     auditLog(auditLogger, buildAuditMessage(OMAction.SET_ACL, auditMap,
+         ex, getOmRequest().getUserInfo()));
    }
  
 +  public static String getRequestType() {
 +    return SetAcl.name() + "-" + ObjectType.VOLUME;
 +  }
 +
    @Override
    public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
        long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
diff --cc hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
index 9d77e50,c118307..bdf11da
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@@ -16,11 -16,7 +16,10 @@@
   */
  package org.apache.hadoop.ozone.protocolPB;
  
 +import static org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils.getRequest;
 +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.PrepareStatus;
 +
  import java.io.IOException;
- import java.util.Optional;
  import java.util.concurrent.ExecutionException;
  import java.util.concurrent.atomic.AtomicLong;
  
@@@ -129,9 -131,11 +133,10 @@@ public class OzoneManagerProtocolServer
        if (OmUtils.isReadOnly(request)) {
          return submitReadRequestToOM(request);
        } else {
-         if (omRatisServer.isLeader()) {
+         raftServerStatus = omRatisServer.checkLeaderStatus();
+         if (raftServerStatus == LEADER_AND_READY) {
            try {
 -            OMClientRequest omClientRequest =
 -                OzoneManagerRatisUtils.createClientRequest(request);
 +            OMClientRequest omClientRequest = getRequest(ozoneManager, request);
              request = omClientRequest.preExecute(ozoneManager);
            } catch (IOException ex) {
              // As some of the preExecute returns error. So handle here.
@@@ -186,11 -185,11 +186,12 @@@
    private OMResponse submitReadRequestToOM(OMRequest request)
        throws ServiceException {
      // Check if this OM is the leader.
-     if (omRatisServer.isLeader() ||
+     RaftServerStatus raftServerStatus = omRatisServer.checkLeaderStatus();
 -    if (raftServerStatus == LEADER_AND_READY) {
++    if (raftServerStatus == LEADER_AND_READY ||
 +        request.getCmdType().equals(PrepareStatus)) {
        return handler.handleReadRequest(request);
      } else {
-       throw createNotLeaderException();
+       throw createLeaderErrorException(raftServerStatus);
      }
    }
  
diff --cc hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
index 2e21b7e,06767fe..b64edce
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
@@@ -115,27 -119,11 +119,28 @@@ public class TestOzoneManagerStarter 
    }
  
    @Test
 +  public void testCallsStartAndCancelPrepareWithUpgradeFlag() {
 +    executeCommand("--upgrade");
 +    assertTrue(mock.startAndCancelPrepareCalled);
 +  }
 +
 +  @Test
 +  public void testUnsuccessfulUpgradeThrowsException() {
 +    mock.throwOnStartAndCancelPrepare = true;
 +    try {
 +      executeCommand("--upgrade");
 +      fail("Exception show have been thrown");
 +    } catch (Exception e) {
 +      assertTrue(true);
 +    }
 +  }
 +
 +  @Test
-   public void testUsagePrintedOnInvalidInput() {
+   public void testUsagePrintedOnInvalidInput()
+       throws UnsupportedEncodingException {
      executeCommand("--invalid");
      Pattern p = Pattern.compile("^Unknown option:.*--invalid.*\nUsage");
-     Matcher m = p.matcher(errContent.toString());
+     Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING));
      assertTrue(m.find());
    }
  
diff --cc hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
index ec931f4,3b94b0b..aa90fbf
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
@@@ -81,8 -81,7 +81,8 @@@ public class ClusterStateEndpoint 
      int containers = this.containerManager.getContainerIDs().size();
      int pipelines = this.pipelineManager.getPipelines().size();
      int healthyDatanodes =
-         nodeManager.getNodeCount(NodeState.HEALTHY) +
-             nodeManager.getNodeCount(NodeState.HEALTHY_READONLY);
 -        nodeManager.getNodeCount(NodeStatus.inServiceHealthy());
++        nodeManager.getNodeCount(NodeStatus.inServiceHealthy()) +
++            nodeManager.getNodeCount(NodeStatus.inServiceHealthyReadOnly());
      SCMNodeStat stats = nodeManager.getStats();
      DatanodeStorageReport storageReport =
          new DatanodeStorageReport(stats.getCapacity().get(),
diff --cc hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
index 55b1d90,1979550..871e039
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
@@@ -132,10 -128,13 +134,13 @@@ public class ReconNodeManager extends S
     * @return SCMheartbeat response.
     */
    @Override
 -  public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails) {
 +  public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails,
 +                                           LayoutVersionProto layoutInfo) {
      // Update heartbeat map with current time
      datanodeHeartbeatMap.put(datanodeDetails.getUuid(), Time.now());
-     return super.processHeartbeat(datanodeDetails, layoutInfo);
 -
 -    List<SCMCommand> cmds = super.processHeartbeat(datanodeDetails);
++    List<SCMCommand> cmds = super.processHeartbeat(datanodeDetails, layoutInfo);
+     return cmds.stream()
+         .filter(c -> ALLOWED_COMMANDS.contains(c.getType()))
+         .collect(toList());
    }
  }
diff --cc hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index acca61d,f5cdbdf..de86d61
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@@ -135,19 -131,14 +133,16 @@@ public class TestEndpoints extends Abst
    private Pipeline pipeline;
    private FileCountBySizeDao fileCountBySizeDao;
    private DSLContext dslContext;
-   private final String host1 = "host1.datanode";
-   private final String host2 = "host2.datanode";
-   private final String ip1 = "1.1.1.1";
-   private final String ip2 = "2.2.2.2";
-   private final String prometheusTestResponseFile =
+   private static final String HOST1 = "host1.datanode";
+   private static final String HOST2 = "host2.datanode";
+   private static final String IP1 = "1.1.1.1";
+   private static final String IP2 = "2.2.2.2";
+   private static final String PROMETHEUS_TEST_RESPONSE_FILE =
        "prometheus-test-response.txt";
    private ReconUtils reconUtilsMock;
 +  private static final int TEST_SOFTWARE_LAYOUT_VERSION = 0;
 +  private static final int TEST_METADATA_LAYOUT_VERSION = 0;
  
-   @Rule
-   public TemporaryFolder temporaryFolder = new TemporaryFolder();
- 
    private void initializeInjector() throws Exception {
      reconOMMetadataManager = getTestReconOmMetadataManager(
          initializeNewOmMetadataManager(temporaryFolder.newFolder()),
diff --cc hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
index c39b714,ee7f8ca..b1e7aa5
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
@@@ -30,6 -31,8 +31,9 @@@ import java.util.UUID
  
  import org.apache.hadoop.hdds.conf.OzoneConfiguration;
  import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
++import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
  import org.apache.hadoop.hdds.scm.net.NetworkTopology;
  import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
  import org.apache.hadoop.hdds.server.events.EventQueue;
@@@ -96,6 -97,25 +103,26 @@@ public class TestReconNodeManager 
      assertEquals(1, reconNodeManager.getAllNodes().size());
      assertNotNull(reconNodeManager.getNodeByUuid(uuidString));
  
+     // If any commands are added to the eventQueue without using the onMessage
+     // interface, then they should be filtered out and not returned to the DN
+     // when it heartbeats.
+     // This command should never be returned by Recon
+     reconNodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
+         new SetNodeOperationalStateCommand(1234,
+         HddsProtos.NodeOperationalState.DECOMMISSIONING, 0));
+ 
+     // This one should be returned
+     reconNodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
+         new ReregisterCommand());
+ 
+     // Upon processing the heartbeat, the illegal command should be filtered out
+     List<SCMCommand> returnedCmds =
 -        reconNodeManager.processHeartbeat(datanodeDetails);
++        reconNodeManager.processHeartbeat(datanodeDetails,
++            LayoutVersionProto.newBuilder().build());
+     assertEquals(1, returnedCmds.size());
+     assertEquals(SCMCommandProto.Type.reregisterCommand,
+         returnedCmds.get(0).getType());
+ 
      // Close the DB, and recreate the instance of Recon Node Manager.
      eventQueue.close();
      reconNodeManager.close();


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 03/04: trigger new CI check

Posted by av...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-nonrolling-upgrade
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit ba4aeca5ae6d063a964cab7127508faf45896064
Author: Aravindan Vijayan <av...@cloudera.com>
AuthorDate: Mon Feb 8 14:18:12 2021 -0800

    trigger new CI check


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org