You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by av...@apache.org on 2021/02/26 16:45:03 UTC

[ozone] branch HDDS-3698-nonrolling-upgrade updated: HDDS-4817. Fresh deploy of Ozone must use the highest layout version by default. (#1933)

This is an automated email from the ASF dual-hosted git repository.

avijayan pushed a commit to branch HDDS-3698-nonrolling-upgrade
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-3698-nonrolling-upgrade by this push:
     new 447024b  HDDS-4817. Fresh deploy of Ozone must use the highest layout version by default. (#1933)
447024b is described below

commit 447024be296908f9b7b0be9e6ad5da1f748a8682
Author: avijayanhwx <14...@users.noreply.github.com>
AuthorDate: Fri Feb 26 08:44:42 2021 -0800

    HDDS-4817. Fresh deploy of Ozone must use the highest layout version by default. (#1933)
---
 .../hadoop/hdds/upgrade/HDDSLayoutFeature.java     |   3 +-
 .../hdds/upgrade/HDDSLayoutVersionManager.java     |  10 +-
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   5 +-
 .../org/apache/hadoop/ozone/common/Storage.java    |  14 ++-
 .../apache/hadoop/ozone/common/StorageInfo.java    |  10 +-
 .../upgrade/AbstractLayoutVersionManager.java      |  50 ++++----
 .../upgrade/TestAbstractLayoutVersionManager.java  |   2 +-
 ...orageConfig.java => DatanodeLayoutStorage.java} |  39 ++----
 ...utVersion.java => HDDSVolumeLayoutVersion.java} |  16 +--
 .../common/statemachine/DatanodeStateMachine.java  |  41 +++---
 .../FinalizeNewLayoutVersionCommandHandler.java    |   2 +-
 .../states/endpoint/HeartbeatEndpointTask.java     |  15 +--
 .../states/endpoint/RegisterEndpointTask.java      |   4 +-
 .../container/common/utils/HddsVolumeUtil.java     |   6 +-
 .../ozone/container/common/volume/HddsVolume.java  |   7 +-
 .../container/upgrade/DataNodeLayoutAction.java    |   5 +-
 .../upgrade/DataNodeUpgradeFinalizer.java          |  15 ++-
 .../upgrade/DatanodeMetadataFeatures.java          |   2 +-
 ...on.java => DatanodeSchemaV2FinalizeAction.java} |   9 +-
 ...nFirstUpgradeVersion.java => UpgradeUtils.java} |  39 +++---
 ...inerDatanodeProtocolServerSideTranslatorPB.java |   8 ++
 .../common/TestDatanodeLayOutVersion.java          |  10 +-
 .../common/helpers/TestDatanodeVersionFile.java    |   4 +-
 .../states/endpoint/TestHeartbeatEndpointTask.java |   8 +-
 .../upgrade/TestDataNodeStartupSlvLessThanMlv.java |  13 +-
 .../apache/hadoop/hdds/scm/node/DatanodeInfo.java  |  19 ++-
 .../apache/hadoop/hdds/scm/node/NodeManager.java   |  21 ++++
 .../hadoop/hdds/scm/node/NodeStateManager.java     |  10 +-
 .../node/ReadOnlyHealthyToHealthyNodeHandler.java  |   5 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |  33 +++--
 .../scm/server/SCMDatanodeHeartbeatDispatcher.java |  20 ++-
 .../hadoop/hdds/scm/server/SCMStorageConfig.java   |  12 +-
 .../hdds/scm/server/StorageContainerManager.java   |  14 +--
 .../hdds/scm/server/upgrade/SCMLayoutAction.java   |   7 +-
 .../scm/server/upgrade/SCMUpgradeFinalizer.java    |  10 ++
 ...=> ScmOnFinalizeActionForDatanodeSchemaV2.java} |  13 +-
 .../java/org/apache/hadoop/hdds/scm/TestUtils.java |   2 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java |   4 +-
 .../TestIncrementalContainerReportHandler.java     |  31 +++--
 .../hdds/scm/node/TestContainerPlacement.java      |  16 +--
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  |  18 +--
 .../hdds/scm/node/TestNodeDecommissionManager.java |   4 +-
 .../hdds/scm/node/TestNodeReportHandler.java       |  11 +-
 .../hadoop/hdds/scm/node/TestNodeStateManager.java |  16 +--
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   | 138 ++++++++++-----------
 .../hadoop/hdds/scm/node/TestStatisticsUpdate.java |   4 +-
 .../scm/server/TestSCMBlockProtocolServer.java     |   2 +-
 .../ozone/container/common/TestEndPoint.java       |  19 ++-
 .../hadoop/ozone/scm/node/TestSCMNodeMetrics.java  |   9 +-
 .../hadoop/hdds/upgrade/TestHDDSUpgrade.java       |  42 ++++---
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |  20 +++
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |  41 +++++-
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java       |   2 +-
 .../java/org/apache/hadoop/ozone/om/OMStorage.java |   8 +-
 .../om/upgrade/ECFeatureOnFinalizeAction.java      |  32 -----
 .../hadoop/ozone/om/upgrade/OMLayoutFeature.java   |   4 +-
 .../ozone/om/upgrade/OMLayoutVersionManager.java   |  11 +-
 .../om/request/UnsupportedMockNewOMRequest.java    |  59 ---------
 .../om/request/key/OMMockECKeyCreateRequest.java   |  54 --------
 .../ozone/om/upgrade/OMLayoutFeatureUtil.java      |   7 +-
 .../om/upgrade/TestOMLayoutFeatureAspect.java      |   4 +-
 .../ozone/om/upgrade/TestOMVersionManager.java     |  34 -----
 .../TestOmVersionManagerRequestFactory.java        |  13 --
 .../hadoop/ozone/recon/scm/ReconNodeManager.java   |  15 ++-
 .../scm/ReconStorageContainerManagerFacade.java    |   2 +-
 .../hadoop/ozone/recon/api/TestEndpoints.java      |  11 +-
 .../scm/AbstractReconContainerManagerTest.java     |   7 +-
 ...TestReconIncrementalContainerReportHandler.java |   7 +-
 .../ozone/recon/scm/TestReconNodeManager.java      |  10 +-
 .../ozone/recon/scm/TestReconPipelineManager.java  |  12 +-
 70 files changed, 549 insertions(+), 621 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java
index 672c3ff..91a663e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java
@@ -29,7 +29,8 @@ import org.apache.hadoop.ozone.upgrade.LayoutFeature;
 public enum HDDSLayoutFeature implements LayoutFeature {
   //////////////////////////////  //////////////////////////////
   INITIAL_VERSION(0, "Initial Layout Version"),
-  FIRST_UPGRADE_VERSION(1, "First Layout Version After Upgrade");
+  DATANODE_SCHEMA_V2(1, "Datanode RocksDB Schema Version 2 (with column " +
+      "families)");
 
   //////////////////////////////  //////////////////////////////
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java
index 92f230e..7d04913 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutVersionManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdds.upgrade;
 
 import java.io.IOException;
 
-import org.apache.hadoop.ozone.common.Storage;
 import org.apache.hadoop.ozone.upgrade.AbstractLayoutVersionManager;
 
 /**
@@ -32,7 +31,12 @@ import org.apache.hadoop.ozone.upgrade.AbstractLayoutVersionManager;
 public class HDDSLayoutVersionManager extends
     AbstractLayoutVersionManager<HDDSLayoutFeature> {
 
-  public HDDSLayoutVersionManager(Storage hddsStorage) throws IOException {
-    init(hddsStorage.getLayoutVersion(), HDDSLayoutFeature.values());
+  public HDDSLayoutVersionManager(int layoutVersion) throws IOException {
+    init(layoutVersion, HDDSLayoutFeature.values());
+  }
+
+  public static int maxLayoutVersion() {
+    HDDSLayoutFeature[] features = HDDSLayoutFeature.values();
+    return features[features.length - 1].layoutVersion();
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index b21ef02..fdbb3a0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -44,7 +44,7 @@ public final class OzoneConsts {
 
   public static final String STORAGE_ID = "storageID";
   public static final String DATANODE_UUID = "datanodeUuid";
-  public static final String DATANODE_STORAGE_DIR = "datanodeStorageConfig";
+  public static final String DATANODE_LAYOUT_VERSION_DIR = "dnlayoutversion";
   public static final String CLUSTER_ID = "clusterID";
   public static final String LAYOUTVERSION = "layOutVersion";
   public static final String CTIME = "ctime";
@@ -397,5 +397,6 @@ public final class OzoneConsts {
 
   public static final String OM_RATIS_SNAPSHOT_DIR = "snapshot";
 
-  public static final long DEFAULT_OM_UPDATE_ID = -1L;  
+  public static final long DEFAULT_OM_UPDATE_ID = -1L;
+
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index 1992b59..b8ad10e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -48,7 +48,6 @@ public abstract class Storage {
   public static final String STORAGE_DIR_CURRENT = "current";
   protected static final String STORAGE_FILE_VERSION = "VERSION";
   public static final String CONTAINER_DIR = "containerDir";
-  private static final int LAYOUT_VERSION = 0;
 
   private final NodeType nodeType;
   private final File root;
@@ -65,7 +64,14 @@ public abstract class Storage {
     NON_EXISTENT, NOT_INITIALIZED, INITIALIZED
   }
 
-  public Storage(NodeType type, File root, String sdName)
+  public Storage(NodeType type, File root, String sdName,
+                 int defaultLayoutVersion)
+      throws IOException {
+    this(type, root, sdName, StorageInfo.newClusterID(), defaultLayoutVersion);
+  }
+
+  public Storage(NodeType type, File root, String sdName,
+                 String id, int defaultLayoutVersion)
       throws IOException {
     this.nodeType = type;
     this.root = root;
@@ -74,8 +80,8 @@ public abstract class Storage {
     if (state == StorageState.INITIALIZED) {
       this.storageInfo = new StorageInfo(type, getVersionFile());
     } else {
-      this.storageInfo = new StorageInfo(
-          nodeType, StorageInfo.newClusterID(), Time.now(), LAYOUT_VERSION);
+      this.storageInfo = new StorageInfo(nodeType, id, Time.now(),
+          defaultLayoutVersion);
       setNodeProperties();
     }
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
index 18871a5..87d912c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
@@ -17,9 +17,13 @@
  */
 package org.apache.hadoop.ozone.common;
 
+import static org.apache.hadoop.ozone.common.Storage.STORAGE_FILE_VERSION;
+
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.FileInputStream;
@@ -38,6 +42,8 @@ import java.util.UUID;
 @InterfaceAudience.Private
 public class StorageInfo {
 
+  public static final Logger LOG = LoggerFactory.getLogger(StorageInfo.class);
+
   private Properties properties = new Properties();
 
   /**
@@ -128,7 +134,9 @@ public class StorageInfo {
   private void verifyLayoutVersion() {
     String layout = getProperty(LAYOUT_VERSION);
     if (layout == null) {
-      // For now, default it to "0"
+      LOG.warn("Found " + STORAGE_FILE_VERSION + " file without any layout " +
+          "version. Defaulting to 0. This should happen only if a cluster is " +
+          "being upgraded from 0.5.0.");
       setProperty(LAYOUT_VERSION, "0");
     }
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java
index c3ecb54..ef74ec7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/AbstractLayoutVersionManager.java
@@ -32,6 +32,8 @@ import java.util.stream.Collectors;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Layout Version Manager containing generic method implementations.
@@ -40,33 +42,38 @@ import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status;
 public abstract class AbstractLayoutVersionManager<T extends LayoutFeature>
     implements LayoutVersionManager, LayoutVersionManagerMXBean {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AbstractLayoutVersionManager.class);
+
   protected int metadataLayoutVersion; // MLV.
   protected int softwareLayoutVersion; // SLV.
   protected TreeMap<Integer, T> features = new TreeMap<>();
   protected Map<String, T> featureMap = new HashMap<>();
-  protected volatile boolean isInitialized = false;
-  protected volatile Status currentUpgradeState =
-      FINALIZATION_REQUIRED;
+  protected volatile Status currentUpgradeState = FINALIZATION_REQUIRED;
 
   protected void init(int version, T[] lfs) throws IOException {
 
-    if (!isInitialized) {
-      metadataLayoutVersion = version;
-      initializeFeatures(lfs);
-      softwareLayoutVersion = features.lastKey();
-      isInitialized = true;
-      if (softwareIsBehindMetaData()) {
-        throw new IOException(
-            String.format("Cannot initialize VersionManager. Metadata " +
-                    "layout version (%d) > software layout version (%d)",
-                metadataLayoutVersion, softwareLayoutVersion));
-      } else if (metadataLayoutVersion == softwareLayoutVersion) {
-        currentUpgradeState = ALREADY_FINALIZED;
-      }
+    metadataLayoutVersion = version;
+    initializeFeatures(lfs);
+    softwareLayoutVersion = features.lastKey();
+    if (softwareIsBehindMetaData()) {
+      throw new IOException(
+          String.format("Cannot initialize VersionManager. Metadata " +
+                  "layout version (%d) > software layout version (%d)",
+              metadataLayoutVersion, softwareLayoutVersion));
+    } else if (metadataLayoutVersion == softwareLayoutVersion) {
+      currentUpgradeState = ALREADY_FINALIZED;
     }
 
+    LayoutFeature mlvFeature = features.get(metadataLayoutVersion);
+    LayoutFeature slvFeature = features.get(softwareLayoutVersion);
+    LOG.info("Initializing Layout version manager with metadata layout" +
+        " = {} (version = {}), software layout = {} (version = {})",
+        mlvFeature, mlvFeature.layoutVersion(),
+        slvFeature, slvFeature.layoutVersion());
+
     MBeans.register("LayoutVersionManager",
-        "AbstractLayoutVersionManager", this);
+        getClass().getSimpleName(), this);
   }
 
   public Status getUpgradeState() {
@@ -86,15 +93,6 @@ public abstract class AbstractLayoutVersionManager<T extends LayoutFeature>
     });
   }
 
-  protected void reset() {
-    metadataLayoutVersion = 0;
-    softwareLayoutVersion = 0;
-    featureMap.clear();
-    features.clear();
-    isInitialized = false;
-    currentUpgradeState = ALREADY_FINALIZED;
-  }
-
   public void finalized(T layoutFeature) {
     if (layoutFeature.layoutVersion() == metadataLayoutVersion + 1) {
       metadataLayoutVersion = layoutFeature.layoutVersion();
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java
index e5c707f..0fcb257 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestAbstractLayoutVersionManager.java
@@ -167,7 +167,7 @@ public class TestAbstractLayoutVersionManager {
     MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
     ObjectName bean = new ObjectName(
         "Hadoop:service=LayoutVersionManager," +
-            "name=AbstractLayoutVersionManager");
+            "name=" + versionManager.getClass().getSimpleName());
 
     Object mlv = mbs.getAttribute(bean, "MetadataLayoutVersion");
     assertEquals(metadataLayoutVersion, mlv);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeStorageConfig.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DatanodeLayoutStorage.java
similarity index 60%
rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeStorageConfig.java
rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DatanodeLayoutStorage.java
index 03c5e70..d0009aa 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeStorageConfig.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DatanodeLayoutStorage.java
@@ -17,13 +17,12 @@
  */
 package org.apache.hadoop.ozone.container.common;
 
-import static org.apache.hadoop.ozone.OzoneConsts.DATANODE_STORAGE_DIR;
-import static org.apache.hadoop.ozone.OzoneConsts.DATANODE_UUID;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
+import static org.apache.hadoop.ozone.OzoneConsts.DATANODE_LAYOUT_VERSION_DIR;
 
 import java.io.File;
 import java.io.IOException;
 import java.util.Properties;
-import java.util.UUID;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
@@ -34,44 +33,32 @@ import org.apache.hadoop.ozone.common.Storage;
  * DataNodeStorageConfig is responsible for management of the
  * StorageDirectories used by the DataNode.
  */
-public class DataNodeStorageConfig extends Storage {
+public class DatanodeLayoutStorage extends Storage {
 
   /**
    * Construct DataNodeStorageConfig.
    * @throws IOException if any directories are inaccessible.
    */
-  public DataNodeStorageConfig(OzoneConfiguration conf, String dataNodeId)
+  public DatanodeLayoutStorage(OzoneConfiguration conf, String dataNodeId)
       throws IOException {
     super(NodeType.DATANODE, ServerUtils.getOzoneMetaDirPath(conf),
-        DATANODE_STORAGE_DIR);
-    setDataNodeId(dataNodeId);
+        DATANODE_LAYOUT_VERSION_DIR, dataNodeId, maxLayoutVersion());
   }
 
-  public DataNodeStorageConfig(NodeType type, File root, String sdName)
+  public DatanodeLayoutStorage(OzoneConfiguration conf, String dataNodeId,
+                               int layoutVersion)
       throws IOException {
-    super(type, root, sdName);
-  }
-
-  public void setDataNodeId(String dataNodeId) throws IOException {
-    getStorageInfo().setProperty(DATANODE_UUID, dataNodeId);
+    super(NodeType.DATANODE, ServerUtils.getOzoneMetaDirPath(conf),
+        DATANODE_LAYOUT_VERSION_DIR, dataNodeId, layoutVersion);
   }
 
-  /**
-   * Retrieves the DataNode ID from the version file.
-   * @return DataNodeId
-   */
-  public String getDataNodeId() {
-    return getStorageInfo().getProperty(DATANODE_UUID);
+  @Override
+  public File getCurrentDir() {
+    return new File(getStorageDir());
   }
 
   @Override
   protected Properties getNodeProperties() {
-    String dataNodeId = getDataNodeId();
-    if (dataNodeId == null) {
-      dataNodeId = UUID.randomUUID().toString();
-    }
-    Properties datanodeProperties = new Properties();
-    datanodeProperties.setProperty(DATANODE_UUID, dataNodeId);
-    return datanodeProperties;
+    return new Properties();
   }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/HDDSVolumeLayoutVersion.java
similarity index 78%
rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/HDDSVolumeLayoutVersion.java
index b914659..711d2ba 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/HDDSVolumeLayoutVersion.java
@@ -18,14 +18,14 @@
 package org.apache.hadoop.ozone.container.common;
 
 /**
- * Datanode layout version which describes information about the layout version
- * on the datanode.
+ * Layout version which describes information about the layout version
+ * on the datanode volume.
  */
-public final class DataNodeLayoutVersion {
+public final class HDDSVolumeLayoutVersion {
 
   // We will just be normal and use positive counting numbers for versions.
-  private static final DataNodeLayoutVersion[] VERSION_INFOS =
-      {new DataNodeLayoutVersion(1, "HDDS Datanode LayOut Version 1")};
+  private static final HDDSVolumeLayoutVersion[] VERSION_INFOS =
+      {new HDDSVolumeLayoutVersion(1, "HDDS Datanode LayOut Version 1")};
 
   private final String description;
   private final int version;
@@ -36,7 +36,7 @@ public final class DataNodeLayoutVersion {
    * @param description -- description
    * @param version     -- version number
    */
-  private DataNodeLayoutVersion(int version, String description) {
+  private HDDSVolumeLayoutVersion(int version, String description) {
     this.description = description;
     this.version = version;
   }
@@ -46,7 +46,7 @@ public final class DataNodeLayoutVersion {
    *
    * @return Version info array.
    */
-  public static DataNodeLayoutVersion[] getAllVersions() {
+  public static HDDSVolumeLayoutVersion[] getAllVersions() {
     return VERSION_INFOS.clone();
   }
 
@@ -55,7 +55,7 @@ public final class DataNodeLayoutVersion {
    *
    * @return versionInfo
    */
-  public static DataNodeLayoutVersion getLatestVersion() {
+  public static HDDSVolumeLayoutVersion getLatestVersion() {
     return VERSION_INFOS[VERSION_INFOS.length - 1];
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index acaa8f8..4834a68 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.ozone.HddsDatanodeStopService;
-import org.apache.hadoop.ozone.container.common.DataNodeStorageConfig;
+import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.report.ReportManager;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CloseContainerCommandHandler;
@@ -59,7 +59,6 @@ import org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator
 import org.apache.hadoop.ozone.container.replication.MeasuredReplicator;
 import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor;
 import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
-import org.apache.hadoop.ozone.container.upgrade.DataNodeLayoutAction;
 import org.apache.hadoop.ozone.container.upgrade.DataNodeUpgradeFinalizer;
 import org.apache.hadoop.ozone.container.upgrade.DatanodeMetadataFeatures;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -98,8 +97,8 @@ public class DatanodeStateMachine implements Closeable {
   private CertificateClient dnCertClient;
   private final HddsDatanodeStopService hddsDatanodeStopService;
 
-  private HDDSLayoutVersionManager dataNodeVersionManager;
-  private DataNodeStorageConfig dataNodeStorageConfig;
+  private HDDSLayoutVersionManager layoutVersionManager;
+  private DatanodeLayoutStorage layoutStorage;
   private DataNodeUpgradeFinalizer upgradeFinalizer;
 
   /**
@@ -128,19 +127,16 @@ public class DatanodeStateMachine implements Closeable {
     this.conf = conf;
     this.datanodeDetails = datanodeDetails;
 
-    loadDataNodeUpgradeActions();
-    dataNodeStorageConfig = new DataNodeStorageConfig(conf,
+    layoutStorage = new DatanodeLayoutStorage(conf,
         datanodeDetails.getUuidString());
-    if (dataNodeStorageConfig.getState() != INITIALIZED) {
-      dataNodeStorageConfig.initialize();
+    if (layoutStorage.getState() != INITIALIZED) {
+      layoutStorage.initialize();
     }
-
-    dataNodeVersionManager =
-        new HDDSLayoutVersionManager(dataNodeStorageConfig);
-    upgradeFinalizer = new DataNodeUpgradeFinalizer(dataNodeVersionManager,
+    layoutVersionManager = new HDDSLayoutVersionManager(
+        layoutStorage.getLayoutVersion());
+    upgradeFinalizer = new DataNodeUpgradeFinalizer(layoutVersionManager,
         datanodeDetails.getUuidString());
-    DatanodeMetadataFeatures.
-        initialize(dataNodeVersionManager);
+    DatanodeMetadataFeatures.initialize(layoutVersionManager);
 
     executorService = Executors.newFixedThreadPool(
         getEndPointTaskThreadPoolSize(),
@@ -598,13 +594,13 @@ public class DatanodeStateMachine implements Closeable {
   }
 
   @VisibleForTesting
-  public HDDSLayoutVersionManager getDataNodeVersionManager() {
-    return dataNodeVersionManager;
+  public HDDSLayoutVersionManager getLayoutVersionManager() {
+    return layoutVersionManager;
   }
 
   @VisibleForTesting
-  public DataNodeStorageConfig getDataNodeStorageConfig() {
-    return dataNodeStorageConfig;
+  public DatanodeLayoutStorage getLayoutStorage() {
+    return layoutStorage;
   }
 
   @VisibleForTesting
@@ -643,15 +639,6 @@ public class DatanodeStateMachine implements Closeable {
     return upgradeFinalizer.finalize(datanodeDetails.getUuidString(), this);
   }
 
-  private void loadDataNodeUpgradeActions() {
-    // we just need to iterate through the enum list to load
-    // the actions.
-    for (DataNodeLayoutAction action : DataNodeLayoutAction.values()) {
-      LOG.info("Loading datanode action for {}",
-          action.getHddsFeature().description());
-    }
-  }
-
   public StatusAndMessages queryUpgradeStatus()
       throws IOException{
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java
index d74f867..3cbc09e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java
@@ -70,7 +70,7 @@ public class FinalizeNewLayoutVersionCommandHandler implements CommandHandler {
     try {
       if (finalizeCommand.getFinalizeNewLayoutVersion()) {
         // SCM is asking datanode to finalize
-        if (dsm.getDataNodeVersionManager().getUpgradeState() ==
+        if (dsm.getLayoutVersionManager().getUpgradeState() ==
             FINALIZATION_REQUIRED) {
           // SCM will keep sending Finalize command until datanode mlv == slv
           // we need to avoid multiple invocations of finalizeUpgrade.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index dbf3bba..dc8078b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -74,6 +74,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys
     .HDDS_PIPELINE_ACTION_MAX_LIMIT;
 import static org.apache.hadoop.hdds.HddsConfigKeys
     .HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT;
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;
 
 /**
  * Heartbeat class for SCMs.
@@ -100,7 +101,7 @@ public class HeartbeatEndpointTask
   public HeartbeatEndpointTask(EndpointStateMachine rpcEndpoint,
                                ConfigurationSource conf, StateContext context) {
     this(rpcEndpoint, conf, context,
-        context.getParent().getDataNodeVersionManager());
+        context.getParent().getLayoutVersionManager());
   }
 
   /**
@@ -124,8 +125,7 @@ public class HeartbeatEndpointTask
     if (versionManager != null) {
       this.layoutVersionManager = versionManager;
     } else {
-      this.layoutVersionManager =
-         context.getParent().getDataNodeVersionManager();
+      this.layoutVersionManager = context.getParent().getLayoutVersionManager();
     }
   }
 
@@ -161,12 +161,9 @@ public class HeartbeatEndpointTask
     try {
       Preconditions.checkState(this.datanodeDetailsProto != null);
 
-      LayoutVersionProto layoutinfo = LayoutVersionProto.newBuilder()
-          .setSoftwareLayoutVersion(
-              layoutVersionManager.getSoftwareLayoutVersion())
-          .setMetadataLayoutVersion(
-              layoutVersionManager.getMetadataLayoutVersion())
-          .build();
+      LayoutVersionProto layoutinfo = toLayoutVersionProto(
+          layoutVersionManager.getMetadataLayoutVersion(),
+          layoutVersionManager.getSoftwareLayoutVersion());
 
       requestBuilder = SCMHeartbeatRequestProto.newBuilder()
           .setDatanodeDetails(datanodeDetailsProto)
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 331ef0f..ee2b7c0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -75,7 +75,7 @@ public final class RegisterEndpointTask implements
                               OzoneContainer ozoneContainer,
                               StateContext context) {
     this(rpcEndPoint, conf, ozoneContainer, context,
-        context.getParent().getDataNodeVersionManager());
+        context.getParent().getLayoutVersionManager());
   }
 
   /**
@@ -99,7 +99,7 @@ public final class RegisterEndpointTask implements
       this.layoutVersionManager = versionManager;
     } else {
       this.layoutVersionManager =
-          context.getParent().getDataNodeVersionManager();
+          context.getParent().getLayoutVersionManager();
     }
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index 3672b51..60c9627 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -22,7 +22,7 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
+import org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
@@ -143,11 +143,11 @@ public final class HddsVolumeUtil {
     String lvStr = getProperty(props, OzoneConsts.LAYOUTVERSION, versionFile);
 
     int lv = Integer.parseInt(lvStr);
-    if(DataNodeLayoutVersion.getLatestVersion().getVersion() != lv) {
+    if(HDDSVolumeLayoutVersion.getLatestVersion().getVersion() != lv) {
       throw new InconsistentStorageStateException("Invalid layOutVersion. " +
           "Version file has layOutVersion as " + lv + " and latest Datanode " +
           "layOutVersion is " +
-          DataNodeLayoutVersion.getLatestVersion().getVersion());
+          HDDSVolumeLayoutVersion.getLatestVersion().getVersion());
     }
     return lv;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 1dee1ba..36c04e1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
+import static org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion.getLatestVersion;
+
 import javax.annotation.Nullable;
 import java.io.File;
 import java.io.IOException;
@@ -35,7 +37,6 @@ import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
 import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
 import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.util.DiskChecker;
@@ -271,7 +272,7 @@ public class HddsVolume
   private void createVersionFile() throws IOException {
     this.storageID = HddsVolumeUtil.generateUuid();
     this.cTime = Time.now();
-    this.layoutVersion = DataNodeLayoutVersion.getLatestVersion().getVersion();
+    this.layoutVersion = getLatestVersion().getVersion();
 
     if (this.clusterID == null || datanodeUuid == null) {
       // HddsDatanodeService does not have the cluster information yet. Wait
@@ -296,7 +297,7 @@ public class HddsVolume
     Preconditions.checkArgument(this.cTime > 0,
         "Creation Time should be positive");
     Preconditions.checkArgument(this.layoutVersion ==
-            DataNodeLayoutVersion.getLatestVersion().getVersion(),
+            getLatestVersion().getVersion(),
         "Version File should have the latest LayOutVersion");
 
     File versionFile = getVersionFile();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutAction.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutAction.java
index f46b434..a6e6465 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutAction.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeLayoutAction.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.ozone.container.upgrade;
 
-import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.FIRST_UPGRADE_VERSION;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.DATANODE_SCHEMA_V2;
 
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
 import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
@@ -30,8 +30,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachin
  * any specific DataNodeActions.
  */
 public enum DataNodeLayoutAction {
-  DataNodeUpgradeFirstAction(FIRST_UPGRADE_VERSION,
-      new DataNodeUpgradeActionFirstUpgradeVersion());
+  DatanodeSchemaV2(DATANODE_SCHEMA_V2, new DatanodeSchemaV2FinalizeAction());
 
   //////////////////////////////  //////////////////////////////
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
index ec10e44..01b2206 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
@@ -42,6 +42,7 @@ public class DataNodeUpgradeFinalizer extends
                                   String optionalClientID) {
     super(versionManager);
     clientID = optionalClientID;
+    loadDataNodeUpgradeActions();
   }
 
   @Override
@@ -88,10 +89,10 @@ public class DataNodeUpgradeFinalizer extends
         for (HDDSLayoutFeature f : versionManager.unfinalizedFeatures()) {
           Optional<? extends LayoutFeature.UpgradeAction> action =
               f.onFinalizeDataNodeAction();
-          finalizeFeature(f, datanodeStateMachine.getDataNodeStorageConfig(),
+          finalizeFeature(f, datanodeStateMachine.getLayoutStorage(),
               action);
           updateLayoutVersionInVersionFile(f,
-              datanodeStateMachine.getDataNodeStorageConfig());
+              datanodeStateMachine.getLayoutStorage());
           versionManager.finalized(f);
         }
         versionManager.completeFinalization();
@@ -104,4 +105,14 @@ public class DataNodeUpgradeFinalizer extends
       }
     }
   }
+
+  private void loadDataNodeUpgradeActions() {
+    // we just need to iterate through the enum list to load
+    // the actions.
+    for (DataNodeLayoutAction action : DataNodeLayoutAction.values()) {
+      LOG.debug("Loading datanode action for {}",
+          action.getHddsFeature().description());
+    }
+  }
+
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeMetadataFeatures.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeMetadataFeatures.java
index a61bd76..dc71a6e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeMetadataFeatures.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeMetadataFeatures.java
@@ -44,7 +44,7 @@ public final class DatanodeMetadataFeatures {
       // this case.
       return OzoneConsts.SCHEMA_V2;
     } else if (versionManager.getMetadataLayoutVersion() <
-        HDDSLayoutFeature.FIRST_UPGRADE_VERSION.layoutVersion()) {
+        HDDSLayoutFeature.DATANODE_SCHEMA_V2.layoutVersion()) {
       return OzoneConsts.SCHEMA_V1;
     } else {
       return OzoneConsts.SCHEMA_V2;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeActionFirstUpgradeVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeSchemaV2FinalizeAction.java
similarity index 81%
copy from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeActionFirstUpgradeVersion.java
copy to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeSchemaV2FinalizeAction.java
index 7e601f2..fa7707e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeActionFirstUpgradeVersion.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeSchemaV2FinalizeAction.java
@@ -26,14 +26,15 @@ import org.slf4j.LoggerFactory;
 /**
  * Upgrade Action for DataNode for the very first first Upgrade Version.
  */
-public class DataNodeUpgradeActionFirstUpgradeVersion
+public class DatanodeSchemaV2FinalizeAction
     implements HDDSUpgradeAction<DatanodeStateMachine> {
 
   public static final Logger LOG =
-      LoggerFactory.getLogger(DataNodeUpgradeActionFirstUpgradeVersion.class);
+      LoggerFactory.getLogger(DatanodeSchemaV2FinalizeAction.class);
   @Override
   public void executeAction(DatanodeStateMachine arg) throws Exception {
-    LOG.info("Executing datanode upgrade action for the very First Upgrade " +
-        "Version.");
+    LOG.info("Executing datanode 'onFinalize' action for the first " +
+        "version with upgrade support. New containers will be " +
+        "created with Schema Version 2 henceforth.");
   }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeActionFirstUpgradeVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/UpgradeUtils.java
similarity index 50%
rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeActionFirstUpgradeVersion.java
rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/UpgradeUtils.java
index 7e601f2..11d725b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeActionFirstUpgradeVersion.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/UpgradeUtils.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -18,22 +18,27 @@
 
 package org.apache.hadoop.ozone.container.upgrade;
 
-import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
+
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 
 /**
- * Upgrade Action for DataNode for the very first first Upgrade Version.
+ * Util methods for upgrade.
  */
-public class DataNodeUpgradeActionFirstUpgradeVersion
-    implements HDDSUpgradeAction<DatanodeStateMachine> {
+public final class UpgradeUtils {
+
+  private UpgradeUtils() {
+  }
+
+  public static LayoutVersionProto defaultLayoutVersionProto() {
+    return LayoutVersionProto.newBuilder()
+        .setMetadataLayoutVersion(maxLayoutVersion())
+        .setSoftwareLayoutVersion(maxLayoutVersion()).build();
+  }
 
-  public static final Logger LOG =
-      LoggerFactory.getLogger(DataNodeUpgradeActionFirstUpgradeVersion.class);
-  @Override
-  public void executeAction(DatanodeStateMachine arg) throws Exception {
-    LOG.info("Executing datanode upgrade action for the very First Upgrade " +
-        "Version.");
+  public static LayoutVersionProto toLayoutVersionProto(int mLv, int sLv) {
+    return LayoutVersionProto.newBuilder()
+        .setMetadataLayoutVersion(mLv)
+        .setSoftwareLayoutVersion(sLv).build();
   }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
index 8a45914..d465ddf 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
@@ -16,6 +16,9 @@
  */
 package org.apache.hadoop.ozone.protocolPB;
 
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.INITIAL_VERSION;
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;
+
 import java.io.IOException;
 
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
@@ -72,6 +75,11 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB
     LayoutVersionProto layoutInfo = null;
     if (request.hasDataNodeLayoutVersion()) {
       layoutInfo = request.getDataNodeLayoutVersion();
+    } else {
+      // Backward compatibility to make sure old Datanodes can still talk to
+      // SCM.
+      layoutInfo = toLayoutVersionProto(INITIAL_VERSION.layoutVersion(),
+          INITIAL_VERSION.layoutVersion());
     }
     return impl.register(request.getExtendedDatanodeDetails(), dnNodeReport,
         containerRequestProto, pipelineReport, layoutInfo);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java
index 5cabef2..7f58b94 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java
@@ -28,11 +28,11 @@ public class TestDatanodeLayOutVersion {
   @Test
   public void testDatanodeLayOutVersion() {
     // Check Latest Version and description
-    Assert.assertEquals(1, DataNodeLayoutVersion.getLatestVersion()
+    Assert.assertEquals(1, HDDSVolumeLayoutVersion.getLatestVersion()
         .getVersion());
-    Assert.assertEquals("HDDS Datanode LayOut Version 1", DataNodeLayoutVersion
-        .getLatestVersion().getDescription());
-    Assert.assertEquals(DataNodeLayoutVersion.getAllVersions().length,
-        DataNodeLayoutVersion.getAllVersions().length);
+    Assert.assertEquals("HDDS Datanode LayOut Version 1",
+        HDDSVolumeLayoutVersion.getLatestVersion().getDescription());
+    Assert.assertEquals(HDDSVolumeLayoutVersion.getAllVersions().length,
+        HDDSVolumeLayoutVersion.getAllVersions().length);
   }
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
index 5889222..769c380 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone.container.common.helpers;
 
 import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
+import org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -59,7 +59,7 @@ public class TestDatanodeVersionFile {
     clusterID = UUID.randomUUID().toString();
     datanodeUUID = UUID.randomUUID().toString();
     cTime = Time.now();
-    lv = DataNodeLayoutVersion.getLatestVersion().getVersion();
+    lv = HDDSVolumeLayoutVersion.getLatestVersion().getVersion();
 
     dnVersionFile = new DatanodeVersionFile(
         storageID, clusterID, datanodeUUID, cTime, lv);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
index 2058c8e..bd13715 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ozone.container.common.states.endpoint;
 
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
+
 import java.net.InetSocketAddress;
 import java.util.UUID;
 
@@ -49,8 +51,6 @@ public class TestHeartbeatEndpointTask {
 
   private static final InetSocketAddress TEST_SCM_ENDPOINT =
       new InetSocketAddress("test-scm-1", 9861);
-  private static final int TEST_SOFTWARE_LAYOUT_VERSION = 0;
-  private static final int TEST_METADATA_LAYOUT_VERSION = 0;
 
   @Test
   public void testheartbeatWithoutReports() throws Exception {
@@ -283,9 +283,9 @@ public class TestHeartbeatEndpointTask {
     HDDSLayoutVersionManager layoutVersionManager =
         Mockito.mock(HDDSLayoutVersionManager.class);
     Mockito.when(layoutVersionManager.getSoftwareLayoutVersion())
-        .thenReturn(TEST_SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     Mockito.when(layoutVersionManager.getMetadataLayoutVersion())
-        .thenReturn(TEST_METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     return HeartbeatEndpointTask.newBuilder()
         .setConfig(conf)
         .setDatanodeDetails(datanodeDetails)
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
index 50001aa..5875e4d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.ozone.container.upgrade;
 
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
+import static org.apache.hadoop.ozone.OzoneConsts.DATANODE_LAYOUT_VERSION_DIR;
+
 import java.io.File;
 import java.io.IOException;
 import java.util.UUID;
@@ -25,9 +28,7 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.upgrade.LayoutFeature;
 import org.apache.hadoop.ozone.upgrade.TestUpgradeUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
@@ -48,18 +49,14 @@ public class TestDataNodeStartupSlvLessThanMlv {
   public void testStartupSlvLessThanMlv() throws Exception {
     // Add subdirectories under the temporary folder where the version file
     // will be placed.
-    File datanodeSubdir = tempFolder.newFolder("datanodeStorageConfig",
-        "current");
+    File datanodeSubdir = tempFolder.newFolder(DATANODE_LAYOUT_VERSION_DIR);
 
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
         tempFolder.getRoot().getAbsolutePath());
 
     // Set metadata layout version larger then software layout version.
-    int largestSlv = 0;
-    for (LayoutFeature f : HDDSLayoutFeature.values()) {
-      largestSlv = Math.max(largestSlv, f.layoutVersion());
-    }
+    int largestSlv = maxLayoutVersion();
     int mlv = largestSlv + 1;
 
     // Create version file with MLV > SLV, which should fail the
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
index 29105ad..333724c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdds.scm.node;
 
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;
+
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
@@ -61,13 +63,9 @@ public class DatanodeInfo extends DatanodeDetails {
     super(datanodeDetails);
     this.lock = new ReentrantReadWriteLock();
     this.lastHeartbeatTime = Time.monotonicNow();
-    lastKnownLayoutVersion =
-        LayoutVersionProto.newBuilder()
-            .setMetadataLayoutVersion(layoutInfo != null ?
-                layoutInfo.getMetadataLayoutVersion() : 0)
-            .setSoftwareLayoutVersion(layoutInfo != null ?
-                layoutInfo.getSoftwareLayoutVersion() : 0)
-            .build();
+    lastKnownLayoutVersion = toLayoutVersionProto(
+        layoutInfo != null ? layoutInfo.getMetadataLayoutVersion() : 0,
+        layoutInfo != null ? layoutInfo.getSoftwareLayoutVersion() : 0);
     this.storageReports = Collections.emptyList();
     this.nodeStatus = nodeStatus;
     this.metadataStorageReports = Collections.emptyList();
@@ -105,10 +103,9 @@ public class DatanodeInfo extends DatanodeDetails {
     }
     try {
       lock.writeLock().lock();
-      lastKnownLayoutVersion = LayoutVersionProto.newBuilder()
-          .setMetadataLayoutVersion(version.getMetadataLayoutVersion())
-          .setSoftwareLayoutVersion(version.getSoftwareLayoutVersion())
-          .build();
+      lastKnownLayoutVersion = toLayoutVersionProto(
+          version.getMetadataLayoutVersion(),
+          version.getSoftwareLayoutVersion());
     } finally {
       lock.writeLock().unlock();
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 17bf6b6..c50c711 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -17,8 +17,11 @@
  */
 package org.apache.hadoop.hdds.scm.node;
 
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto;
+
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -33,6 +36,7 @@ import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 
 import java.io.Closeable;
@@ -66,6 +70,23 @@ import java.util.UUID;
 public interface NodeManager extends StorageContainerNodeProtocol,
     EventHandler<CommandForDatanode>, NodeManagerMXBean, Closeable {
 
+
+  /**
+   * Register API without a layout version info object passed in. Useful for
+   * tests.
+   * @param datanodeDetails DN details
+   * @param nodeReport Node report
+   * @param pipelineReportsProto Pipeline reports
+   * @return whatever the regular register command returns with default
+   * layout version passed in.
+   */
+  default RegisteredCommand register(
+      DatanodeDetails datanodeDetails, NodeReportProto nodeReport,
+      PipelineReportsProto pipelineReportsProto) {
+    return register(datanodeDetails, nodeReport, pipelineReportsProto,
+        defaultLayoutVersionProto());
+  }
+
   /**
    * Gets all Live Datanodes that are currently communicating with SCM.
    * @param nodeStatus - Status of the node to return
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index 130aa48..74a50bb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -651,9 +651,9 @@ public class NodeStateManager implements Runnable, Closeable {
               node);
         }
       }
+
     } catch (NodeNotFoundException ex) {
       LOG.error("Inconsistent NodeStateMap! {}", nodeStateMap);
-      ex.printStackTrace();
     }
   }
 
@@ -710,18 +710,18 @@ public class NodeStateManager implements Runnable, Closeable {
         NodeStatus status = nodeStateMap.getNodeStatus(node.getUuid());
         switch (status.getHealth()) {
         case HEALTHY:
-          // Move the node to STALE if the last heartbeat time is less than
-          // configured stale-node interval.
           updateNodeLayoutVersionState(node, layoutMisMatchCondition, status,
               NodeLifeCycleEvent.LAYOUT_MISMATCH);
+          // Move the node to STALE if the last heartbeat time is less than
+          // configured stale-node interval.
           updateNodeState(node, staleNodeCondition, status,
               NodeLifeCycleEvent.TIMEOUT);
           break;
         case HEALTHY_READONLY:
-          // Move the node to STALE if the last heartbeat time is less than
-          // configured stale-node interval.
           updateNodeLayoutVersionState(node, layoutMatchCondition, status,
               NodeLifeCycleEvent.LAYOUT_MATCH);
+          // Move the node to STALE if the last heartbeat time is less than
+          // configured stale-node interval.
           updateNodeState(node, staleNodeCondition, status,
               NodeLifeCycleEvent.TIMEOUT);
           break;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/ReadOnlyHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/ReadOnlyHealthyToHealthyNodeHandler.java
index ea79bf6..49d5583 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/ReadOnlyHealthyToHealthyNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/ReadOnlyHealthyToHealthyNodeHandler.java
@@ -27,13 +27,14 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Handles Stale node event.
+ * Handles Read Only healthy to healthy node event. (Possibly due to a
+ * datanode having finalized)
  */
 public class ReadOnlyHealthyToHealthyNodeHandler
     implements EventHandler<DatanodeDetails> {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(NonHealthyToReadOnlyHealthyNodeHandler.class);
+      LoggerFactory.getLogger(ReadOnlyHealthyToHealthyNodeHandler.class);
 
   private final PipelineManager pipelineManager;
   private final ConfigurationSource conf;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 2c764c3..5413bee 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -307,6 +307,19 @@ public class SCMNodeManager implements NodeManager {
         .build();
   }
 
+  @Override
+  public RegisteredCommand register(
+      DatanodeDetails datanodeDetails, NodeReportProto nodeReport,
+      PipelineReportsProto pipelineReportsProto) {
+    return register(datanodeDetails, nodeReport, pipelineReportsProto,
+        LayoutVersionProto.newBuilder()
+            .setMetadataLayoutVersion(
+                scmLayoutVersionManager.getMetadataLayoutVersion())
+            .setSoftwareLayoutVersion(
+                scmLayoutVersionManager.getSoftwareLayoutVersion())
+            .build());
+  }
+
   /**
    * Register the node if the node finds that it is not registered with any
    * SCM.
@@ -324,17 +337,15 @@ public class SCMNodeManager implements NodeManager {
       DatanodeDetails datanodeDetails, NodeReportProto nodeReport,
       PipelineReportsProto pipelineReportsProto,
       LayoutVersionProto layoutInfo) {
-
-    if (layoutInfo != null) {
-      if (layoutInfo.getSoftwareLayoutVersion() >
-          scmLayoutVersionManager.getSoftwareLayoutVersion()) {
-        return RegisteredCommand.newBuilder()
-            .setErrorCode(ErrorCode.errorNodeNotPermitted)
-            .setDatanode(datanodeDetails)
-            .setClusterID(this.scmStorageConfig.getClusterID())
-            .build();
-      }
+    if (layoutInfo.getSoftwareLayoutVersion() >
+        scmLayoutVersionManager.getSoftwareLayoutVersion()) {
+      return RegisteredCommand.newBuilder()
+          .setErrorCode(ErrorCode.errorNodeNotPermitted)
+          .setDatanode(datanodeDetails)
+          .setClusterID(this.scmStorageConfig.getClusterID())
+          .build();
     }
+
     if (!isNodeRegistered(datanodeDetails)) {
       InetAddress dnAddress = Server.getRemoteIp();
       if (dnAddress != null) {
@@ -358,6 +369,8 @@ public class SCMNodeManager implements NodeManager {
 
         clusterMap.add(datanodeDetails);
         nodeStateManager.addNode(datanodeDetails, layoutInfo);
+        nodeStateManager.updateLastKnownLayoutVersion(datanodeDetails,
+            layoutInfo);
         // Check that datanode in nodeStateManager has topology parent set
         DatanodeDetails dn = nodeStateManager.getNode(datanodeDetails);
         Preconditions.checkState(dn.getParent() != null);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index 89625a0..3439933 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -56,6 +56,8 @@ import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_ACTIONS;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.INITIAL_VERSION;
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;
 
 /**
  * This class is responsible for dispatching heartbeat from datanode to
@@ -102,15 +104,23 @@ public final class SCMDatanodeHeartbeatDispatcher {
       commands = nodeManager.getCommandQueue(dnID);
 
     } else {
-      if (heartbeat.hasDataNodeLayoutVersion()) {
-        LOG.debug("Processing DataNode Layout Report.");
-        nodeManager.processLayoutVersionReport(datanodeDetails,
-            heartbeat.getDataNodeLayoutVersion());
+
+      LayoutVersionProto layoutVersion = null;
+      if (!heartbeat.hasDataNodeLayoutVersion()) {
+        // Backward compatibility to make sure old Datanodes can still talk to
+        // SCM.
+        layoutVersion = toLayoutVersionProto(INITIAL_VERSION.layoutVersion(),
+            INITIAL_VERSION.layoutVersion());
+      } else {
+        layoutVersion = heartbeat.getDataNodeLayoutVersion();
       }
 
+      LOG.debug("Processing DataNode Layout Report.");
+      nodeManager.processLayoutVersionReport(datanodeDetails, layoutVersion);
+
       // should we dispatch heartbeat through eventPublisher?
       commands = nodeManager.processHeartbeat(datanodeDetails,
-          heartbeat.getDataNodeLayoutVersion());
+          layoutVersion);
       if (heartbeat.hasNodeReport()) {
         LOG.debug("Dispatching Node Report.");
         eventPublisher.fireEvent(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
index a628279..a4bbfbf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
@@ -27,6 +27,7 @@ import java.io.IOException;
 import java.util.Properties;
 import java.util.UUID;
 
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
 import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
 import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR;
 
@@ -41,12 +42,19 @@ public class SCMStorageConfig extends Storage {
    * @throws IOException if any directories are inaccessible.
    */
   public SCMStorageConfig(OzoneConfiguration conf) throws IOException {
-    super(NodeType.SCM, ServerUtils.getScmDbDir(conf), STORAGE_DIR);
+    super(NodeType.SCM, ServerUtils.getScmDbDir(conf), STORAGE_DIR,
+        maxLayoutVersion());
+  }
+
+  public SCMStorageConfig(OzoneConfiguration conf, int defaultLayoutVersion)
+      throws IOException {
+    super(NodeType.SCM, ServerUtils.getScmDbDir(conf), STORAGE_DIR,
+        defaultLayoutVersion);
   }
 
   public SCMStorageConfig(NodeType type, File root, String sdName)
       throws IOException {
-    super(type, root, sdName);
+    super(type, root, sdName, maxLayoutVersion());
   }
 
   public void setScmId(String scmId) throws IOException {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 7bf4f43..75e6a8a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -91,7 +91,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.PipelineChoosePolicyFactory;
 import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
-import org.apache.hadoop.hdds.scm.server.upgrade.SCMLayoutAction;
 import org.apache.hadoop.hdds.scm.server.upgrade.SCMUpgradeFinalizer;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
@@ -267,8 +266,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
           "failure.", ResultCodes.SCM_NOT_INITIALIZED);
     }
 
-    loadSCMUpgradeActions();
-    scmLayoutVersionManager = new HDDSLayoutVersionManager(scmStorageConfig);
+    scmLayoutVersionManager = new HDDSLayoutVersionManager(
+        scmStorageConfig.getLayoutVersion());
     upgradeFinalizer = new SCMUpgradeFinalizer(scmLayoutVersionManager);
 
     /**
@@ -1323,13 +1322,4 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
   ) throws IOException {
     return upgradeFinalizer.reportStatus(upgradeClientID, takeover);
   }
-
-  private void loadSCMUpgradeActions() {
-    // we just need to iterate through the enum list to load
-    // the actions.
-    for (SCMLayoutAction action : SCMLayoutAction.values()) {
-      LOG.info("Loading datanode action for {}",
-          action.getHddsFeature().description());
-    }
-  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMLayoutAction.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMLayoutAction.java
index 155c947..9cd4aea 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMLayoutAction.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMLayoutAction.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.server.upgrade;
 
-import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.FIRST_UPGRADE_VERSION;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.DATANODE_SCHEMA_V2;
 
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
@@ -30,9 +30,8 @@ import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
  * any specific SCMActions.
  */
 public enum SCMLayoutAction {
-  SCMUpgradeFirstAction(FIRST_UPGRADE_VERSION,
-      new SCMUpgradeActionFirstUpgradeVersion());
-
+  ScmActionForDatanodeSchemaV2(DATANODE_SCHEMA_V2,
+      new ScmOnFinalizeActionForDatanodeSchemaV2());
   //////////////////////////////  //////////////////////////////
 
   private HDDSLayoutFeature hddsFeature;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
index b220b18..7908147 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizer.java
@@ -40,6 +40,7 @@ public class SCMUpgradeFinalizer extends
 
   public SCMUpgradeFinalizer(HDDSLayoutVersionManager versionManager) {
     super(versionManager);
+    loadSCMUpgradeActions();
   }
 
   @Override
@@ -102,4 +103,13 @@ public class SCMUpgradeFinalizer extends
       }
     }
   }
+
+  private void loadSCMUpgradeActions() {
+    // we just need to iterate through the enum list to load
+    // the actions.
+    for (SCMLayoutAction action : SCMLayoutAction.values()) {
+      LOG.debug("Loading datanode action for {}",
+          action.getHddsFeature().description());
+    }
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeActionFirstUpgradeVersion.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmOnFinalizeActionForDatanodeSchemaV2.java
similarity index 76%
rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeActionFirstUpgradeVersion.java
rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmOnFinalizeActionForDatanodeSchemaV2.java
index 01f54f5..1556d5e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeActionFirstUpgradeVersion.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmOnFinalizeActionForDatanodeSchemaV2.java
@@ -16,7 +16,9 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdds.scm.server.upgrade; 
+package org.apache.hadoop.hdds.scm.server.upgrade;
+
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.DATANODE_SCHEMA_V2;
 
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
@@ -26,12 +28,13 @@ import org.slf4j.LoggerFactory;
 /**
  * SCM Upgrade Action for the very first Upgrade Version.
  */
-public class SCMUpgradeActionFirstUpgradeVersion implements
+public class ScmOnFinalizeActionForDatanodeSchemaV2 implements
     HDDSUpgradeAction<StorageContainerManager> {
   public static final Logger LOG =
-      LoggerFactory.getLogger(SCMUpgradeActionFirstUpgradeVersion.class);
+      LoggerFactory.getLogger(ScmOnFinalizeActionForDatanodeSchemaV2.class);
   @Override
   public void executeAction(StorageContainerManager arg) throws Exception {
-    LOG.info("Executing SCM Upgrade action for Very first Upgrade Version");
+    LOG.info("Executing SCM On Finalize action for layout feature {}",
+        DATANODE_SCHEMA_V2);
   }
-}
+}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 144a04a..42640f3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -116,7 +116,7 @@ public final class TestUtils {
       SCMNodeManager nodeManager) {
     return getDatanodeDetails(
         nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), null,
-                getRandomPipelineReports(), null));
+                getRandomPipelineReports()));
   }
 
   /**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 4a8924d..724be50 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -116,14 +116,14 @@ public class MockNodeManager implements NodeManager {
     if (!nodes.isEmpty()) {
       for (int x = 0; x < nodes.size(); x++) {
         DatanodeDetails node = nodes.get(x);
-        register(node, null, null, null);
+        register(node, null, null);
         populateNodeMetric(node, x);
       }
     }
     if (initializeFakeNodes) {
       for (int x = 0; x < nodeCount; x++) {
         DatanodeDetails dd = MockDatanodeDetails.randomDatanodeDetails();
-        register(dd, null, null, null);
+        register(dd, null, null);
         populateNodeMetric(dd, x);
       }
     }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
index 5d9246a..863b3f3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
@@ -52,6 +52,7 @@ import java.util.UUID;
 import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
 import static org.apache.hadoop.hdds.scm.TestUtils.getContainer;
 import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
 
 /**
  * Test cases to verify the functionality of IncrementalContainerReportHandler.
@@ -62,8 +63,6 @@ public class TestIncrementalContainerReportHandler {
   private ContainerManager containerManager;
   private ContainerStateManager containerStateManager;
   private EventPublisher publisher;
-  private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
-  private static final Integer METADATA_LAYOUT_VERSION = 1;
   private HDDSLayoutVersionManager versionManager;
 
   @Before
@@ -80,9 +79,9 @@ public class TestIncrementalContainerReportHandler {
     this.versionManager =
         Mockito.mock(HDDSLayoutVersionManager.class);
     Mockito.when(versionManager.getMetadataLayoutVersion())
-        .thenReturn(METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     Mockito.when(versionManager.getSoftwareLayoutVersion())
-        .thenReturn(SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     this.nodeManager =
         new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap,
             versionManager);
@@ -134,9 +133,9 @@ public class TestIncrementalContainerReportHandler {
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
     final DatanodeDetails datanodeThree = randomDatanodeDetails();
-    nodeManager.register(datanodeOne, null, null, null);
-    nodeManager.register(datanodeTwo, null, null, null);
-    nodeManager.register(datanodeThree, null, null, null);
+    nodeManager.register(datanodeOne, null, null);
+    nodeManager.register(datanodeTwo, null, null);
+    nodeManager.register(datanodeThree, null, null);
     final Set<ContainerReplica> containerReplicas = getReplicas(
         container.containerID(),
         ContainerReplicaProto.State.CLOSING,
@@ -171,9 +170,9 @@ public class TestIncrementalContainerReportHandler {
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
     final DatanodeDetails datanodeThree = randomDatanodeDetails();
-    nodeManager.register(datanodeOne, null, null, null);
-    nodeManager.register(datanodeTwo, null, null, null);
-    nodeManager.register(datanodeThree, null, null, null);
+    nodeManager.register(datanodeOne, null, null);
+    nodeManager.register(datanodeTwo, null, null);
+    nodeManager.register(datanodeThree, null, null);
     final Set<ContainerReplica> containerReplicas = getReplicas(
         container.containerID(),
         ContainerReplicaProto.State.CLOSING,
@@ -209,9 +208,9 @@ public class TestIncrementalContainerReportHandler {
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
     final DatanodeDetails datanodeThree = randomDatanodeDetails();
-    nodeManager.register(datanodeOne, null, null, null);
-    nodeManager.register(datanodeTwo, null, null, null);
-    nodeManager.register(datanodeThree, null, null, null);
+    nodeManager.register(datanodeOne, null, null);
+    nodeManager.register(datanodeTwo, null, null);
+    nodeManager.register(datanodeThree, null, null);
     final Set<ContainerReplica> containerReplicas = getReplicas(
         container.containerID(),
         ContainerReplicaProto.State.CLOSING,
@@ -250,9 +249,9 @@ public class TestIncrementalContainerReportHandler {
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
     final DatanodeDetails datanodeThree = randomDatanodeDetails();
-    nodeManager.register(datanodeOne, null, null, null);
-    nodeManager.register(datanodeTwo, null, null, null);
-    nodeManager.register(datanodeThree, null, null, null);
+    nodeManager.register(datanodeOne, null, null);
+    nodeManager.register(datanodeTwo, null, null);
+    nodeManager.register(datanodeThree, null, null);
     final Set<ContainerReplica> containerReplicas = getReplicas(
         container.containerID(),
         ContainerReplicaProto.State.CLOSED,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 2fec55c..e1a42d2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -53,6 +53,9 @@ import org.apache.hadoop.test.PathUtils;
 import org.apache.commons.io.IOUtils;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
 import org.junit.After;
+
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
 import static org.junit.Assert.assertEquals;
 import org.junit.Before;
 import org.junit.Ignore;
@@ -65,8 +68,6 @@ import org.mockito.Mockito;
  * Test for different container placement policy.
  */
 public class TestContainerPlacement {
-  private static final int SOFTWARE_LAYOUT_VERSION = 1;
-  private static final int METADATA_LAYOUT_VERSION = 1;
 
   @Rule
   public ExpectedException thrown = ExpectedException.none();
@@ -114,9 +115,9 @@ public class TestContainerPlacement {
     HDDSLayoutVersionManager versionManager =
         Mockito.mock(HDDSLayoutVersionManager.class);
     Mockito.when(versionManager.getMetadataLayoutVersion())
-        .thenReturn(METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     Mockito.when(versionManager.getSoftwareLayoutVersion())
-        .thenReturn(SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     SCMNodeManager nodeManager = new SCMNodeManager(config,
         storageConfig, eventQueue, null, versionManager);
     return nodeManager;
@@ -166,10 +167,9 @@ public class TestContainerPlacement {
         TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
     XceiverClientManager xceiverClientManager = null;
     LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
-    LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-        .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-        .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-        .build();
+    LayoutVersionProto layoutInfo =
+        toLayoutVersionProto(versionManager.getMetadataLayoutVersion(),
+            versionManager.getSoftwareLayoutVersion());
     try {
       for (DatanodeDetails datanodeDetails : datanodes) {
         nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 23ca76b..d7c7d2e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -143,25 +143,25 @@ public class TestDeadNodeHandler {
     // test case happy.
 
     nodeManager.register(datanode1,
-        TestUtils.createNodeReport(storageOne), null, null);
+        TestUtils.createNodeReport(storageOne), null);
     nodeManager.register(datanode2,
-        TestUtils.createNodeReport(storageOne), null, null);
+        TestUtils.createNodeReport(storageOne), null);
     nodeManager.register(datanode3,
-        TestUtils.createNodeReport(storageOne), null, null);
+        TestUtils.createNodeReport(storageOne), null);
 
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null, null);
+        TestUtils.createNodeReport(storageOne), null);
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null, null);
+        TestUtils.createNodeReport(storageOne), null);
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null, null);
+        TestUtils.createNodeReport(storageOne), null);
 
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null, null);
+        TestUtils.createNodeReport(storageOne), null);
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null, null);
+        TestUtils.createNodeReport(storageOne), null);
     nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
-        TestUtils.createNodeReport(storageOne), null, null);
+        TestUtils.createNodeReport(storageOne), null);
 
     LambdaTestUtils.await(120000, 1000,
         () -> {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
index 8d4b3b1..ae44f2e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
@@ -270,7 +270,7 @@ public class TestNodeDecommissionManager {
     for (int i=0; i<10; i++) {
       DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
       dns.add(dn);
-      nodeManager.register(dn, null, null, null);
+      nodeManager.register(dn, null, null);
     }
     // We have 10 random DNs, we want to create another one that is on the same
     // host as some of the others.
@@ -289,7 +289,7 @@ public class TestNodeDecommissionManager {
         .setNetworkLocation(multiDn.getNetworkLocation());
 
     DatanodeDetails dn = builder.build();
-    nodeManager.register(dn, null, null, null);
+    nodeManager.register(dn, null, null);
     dns.add(dn);
     return dns;
   }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index 2710225..4e8f0f6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -16,6 +16,8 @@
  */
 package org.apache.hadoop.hdds.scm.node;
 
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
+
 import java.io.IOException;
 import java.util.UUID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -50,8 +52,6 @@ public class TestNodeReportHandler implements EventPublisher {
       .getLogger(TestNodeReportHandler.class);
   private NodeReportHandler nodeReportHandler;
   private HDDSLayoutVersionManager versionManager;
-  private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
-  private static final Integer METADATA_LAYOUT_VERSION = 1;
   private SCMNodeManager nodeManager;
   private String storagePath = GenericTestUtils.getRandomizedTempPath()
       .concat("/" + UUID.randomUUID().toString());
@@ -66,9 +66,9 @@ public class TestNodeReportHandler implements EventPublisher {
     this.versionManager =
         Mockito.mock(HDDSLayoutVersionManager.class);
     Mockito.when(versionManager.getMetadataLayoutVersion())
-        .thenReturn(METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     Mockito.when(versionManager.getSoftwareLayoutVersion())
-        .thenReturn(SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     nodeManager =
         new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap,
             versionManager);
@@ -84,8 +84,7 @@ public class TestNodeReportHandler implements EventPublisher {
     SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn);
     Assert.assertNull(nodeMetric);
 
-    nodeManager.register(dn, getNodeReport(dn, storageOne).getReport(), null,
-        null);
+    nodeManager.register(dn, getNodeReport(dn, storageOne).getReport(), null);
     nodeMetric = nodeManager.getNodeStat(dn);
 
     Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
index 3962521..20ce7c3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
@@ -43,6 +43,8 @@ import java.util.List;
 import java.util.UUID;
 
 import static junit.framework.TestCase.assertEquals;
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
 
 /**
  * Class to test the NodeStateManager, which is an internal class used by
@@ -54,8 +56,6 @@ public class TestNodeStateManager {
   private NodeStateManager nsm;
   private ConfigurationSource conf;
   private MockEventPublisher eventPublisher;
-  private static final int TEST_SOFTWARE_LAYOUT_VERSION = 0;
-  private static final int TEST_METADATA_LAYOUT_VERSION = 0;
 
   @Before
   public void setUp() {
@@ -79,9 +79,9 @@ public class TestNodeStateManager {
     LayoutVersionManager mockVersionManager =
         Mockito.mock(HDDSLayoutVersionManager.class);
     Mockito.when(mockVersionManager.getMetadataLayoutVersion())
-        .thenReturn(TEST_METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     Mockito.when(mockVersionManager.getSoftwareLayoutVersion())
-        .thenReturn(TEST_SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     nsm = new NodeStateManager(conf, eventPublisher, mockVersionManager);
   }
 
@@ -141,15 +141,15 @@ public class TestNodeStateManager {
     long deadLimit = HddsServerUtil.getDeadNodeInterval(conf) + 1000;
 
     DatanodeDetails staleDn = generateDatanode();
-    nsm.addNode(staleDn, null);
+    nsm.addNode(staleDn, defaultLayoutVersionProto());
     nsm.getNode(staleDn).updateLastHeartbeatTime(now - staleLimit);
 
     DatanodeDetails deadDn = generateDatanode();
-    nsm.addNode(deadDn, null);
+    nsm.addNode(deadDn, defaultLayoutVersionProto());
     nsm.getNode(deadDn).updateLastHeartbeatTime(now - deadLimit);
 
     DatanodeDetails healthyDn = generateDatanode();
-    nsm.addNode(healthyDn, null);
+    nsm.addNode(healthyDn, defaultLayoutVersionProto());
     nsm.getNode(healthyDn).updateLastHeartbeatTime();
 
     nsm.checkNodesHealth();
@@ -174,7 +174,7 @@ public class TestNodeStateManager {
     long deadLimit = HddsServerUtil.getDeadNodeInterval(conf) + 1000;
 
     DatanodeDetails dn = generateDatanode();
-    nsm.addNode(dn, null);
+    nsm.addNode(dn, defaultLayoutVersionProto());
     assertEquals(SCMEvents.NEW_NODE, eventPublisher.getLastEvent());
     DatanodeInfo dni = nsm.getNode(dn);
     dni.updateLastHeartbeatTime();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index face870..2d02c3e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -94,6 +94,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_STALENODE_INTERVAL;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
@@ -179,10 +180,9 @@ public class TestSCMNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
       LayoutVersionManager versionManager =
           nodeManager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
       int registeredNodes = 5;
       // Send some heartbeats from different nodes.
       for (int x = 0; x < registeredNodes; x++) {
@@ -211,15 +211,17 @@ public class TestSCMNodeManager {
       throws IOException, InterruptedException, AuthenticationException {
 
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
-      Integer nodeManagerSoftwareLayoutVersion =
-          nodeManager.getLayoutVersionManager().getSoftwareLayoutVersion();
-      LayoutVersionProto layoutInfoSuccess = LayoutVersionProto.newBuilder()
-          .setMetadataLayoutVersion(1)
-          .setSoftwareLayoutVersion(nodeManagerSoftwareLayoutVersion).build();
-      LayoutVersionProto layoutInfoFailure = LayoutVersionProto.newBuilder()
-          .setMetadataLayoutVersion(1)
-          .setSoftwareLayoutVersion(nodeManagerSoftwareLayoutVersion + 1)
-          .build();
+      HDDSLayoutVersionManager layoutVersionManager =
+          nodeManager.getLayoutVersionManager();
+      int nodeManagerMetadataLayoutVersion =
+          layoutVersionManager.getMetadataLayoutVersion();
+      int nodeManagerSoftwareLayoutVersion =
+          layoutVersionManager.getSoftwareLayoutVersion();
+      LayoutVersionProto layoutInfoSuccess = toLayoutVersionProto(
+          nodeManagerMetadataLayoutVersion, nodeManagerSoftwareLayoutVersion);
+      LayoutVersionProto layoutInfoFailure = toLayoutVersionProto(
+          nodeManagerSoftwareLayoutVersion + 1,
+          nodeManagerSoftwareLayoutVersion + 1);
       RegisteredCommand rcmd = nodeManager.register(
           MockDatanodeDetails.randomDatanodeDetails(), null,
           getRandomPipelineReports(), layoutInfoSuccess);
@@ -269,10 +271,9 @@ public class TestSCMNodeManager {
     DatanodeDetails datanodeDetails = TestUtils
         .createRandomDatanodeAndRegister(nodeManager);
     LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
-    LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-        .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-        .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-        .build();
+    LayoutVersionProto layoutInfo = toLayoutVersionProto(
+        versionManager.getMetadataLayoutVersion(),
+        versionManager.getSoftwareLayoutVersion());
     nodeManager.close();
 
     // These should never be processed.
@@ -301,10 +302,9 @@ public class TestSCMNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       LayoutVersionManager versionManager =
           nodeManager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
 
       for (int x = 0; x < count; x++) {
         DatanodeDetails datanodeDetails = TestUtils
@@ -400,10 +400,9 @@ public class TestSCMNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       LayoutVersionManager versionManager =
           nodeManager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
       List<DatanodeDetails> nodeList = createNodeSet(nodeManager, nodeCount);
 
 
@@ -507,10 +506,9 @@ public class TestSCMNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       LayoutVersionManager versionManager =
           nodeManager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
       DatanodeDetails node1 =
           TestUtils.createRandomDatanodeAndRegister(nodeManager);
       DatanodeDetails node2 =
@@ -610,7 +608,7 @@ public class TestSCMNodeManager {
     when(scmStorageConfig.getClusterID()).thenReturn("xyz111");
     EventPublisher eventPublisher = mock(EventPublisher.class);
     HDDSLayoutVersionManager lvm  =
-        new HDDSLayoutVersionManager(scmStorageConfig);
+        new HDDSLayoutVersionManager(scmStorageConfig.getLayoutVersion());
     SCMNodeManager nodeManager  = new SCMNodeManager(conf,
         scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf), lvm);
     DatanodeDetails node1 =
@@ -716,10 +714,9 @@ public class TestSCMNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       LayoutVersionManager versionManager =
           nodeManager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
       DatanodeDetails healthyNode =
           TestUtils.createRandomDatanodeAndRegister(nodeManager);
       DatanodeDetails staleNode =
@@ -840,10 +837,9 @@ public class TestSCMNodeManager {
                                 List<DatanodeDetails> list,
                                 int sleepDuration) throws InterruptedException {
     LayoutVersionManager versionManager = manager.getLayoutVersionManager();
-    LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-        .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-        .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-        .build();
+    LayoutVersionProto layoutInfo = toLayoutVersionProto(
+        versionManager.getMetadataLayoutVersion(),
+        versionManager.getSoftwareLayoutVersion());
     while (!Thread.currentThread().isInterrupted()) {
       for (DatanodeDetails dn : list) {
         manager.processHeartbeat(dn, layoutInfo);
@@ -931,10 +927,9 @@ public class TestSCMNodeManager {
 
       LayoutVersionManager versionManager =
           nodeManager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
 
       // No Thread just one time HBs the node manager, so that these will be
       // marked as dead nodes eventually.
@@ -1068,10 +1063,10 @@ public class TestSCMNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       LayoutVersionManager versionManager =
           nodeManager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
+
       EventQueue eventQueue = (EventQueue) scm.getEventQueue();
       for (int x = 0; x < nodeCount; x++) {
         DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
@@ -1081,8 +1076,7 @@ public class TestSCMNodeManager {
         String storagePath = testDir.getAbsolutePath() + "/" + dnId;
         StorageReportProto report = TestUtils
             .createStorageReport(dnId, storagePath, capacity, used, free, null);
-        nodeManager.register(dn, TestUtils.createNodeReport(report), null,
-            null);
+        nodeManager.register(dn, TestUtils.createNodeReport(report), null);
         nodeManager.processHeartbeat(dn, layoutInfo);
       }
       //TODO: wait for EventQueue to be processed
@@ -1137,13 +1131,12 @@ public class TestSCMNodeManager {
                 used, free, null, failed));
         failed = !failed;
       }
-      nodeManager.register(dn, TestUtils.createNodeReport(reports), null, null);
+      nodeManager.register(dn, TestUtils.createNodeReport(reports), null);
       LayoutVersionManager versionManager =
           nodeManager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
       nodeManager.processHeartbeat(dn, layoutInfo);
       //TODO: wait for EventQueue to be processed
       eventQueue.processAll(8000L);
@@ -1203,10 +1196,9 @@ public class TestSCMNodeManager {
             publisher);
         LayoutVersionManager versionManager =
             nodeManager.getLayoutVersionManager();
-        LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-            .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-            .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-            .build();
+        LayoutVersionProto layoutInfo = toLayoutVersionProto(
+            versionManager.getMetadataLayoutVersion(),
+            versionManager.getSoftwareLayoutVersion());
         nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
         Thread.sleep(100);
       }
@@ -1284,10 +1276,9 @@ public class TestSCMNodeManager {
 
       LayoutVersionManager versionManager =
           nodeManager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
 
       nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
 
@@ -1329,9 +1320,8 @@ public class TestSCMNodeManager {
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
       eq.addHandler(DATANODE_COMMAND, nodemanager);
 
-      nodemanager
-          .register(datanodeDetails, TestUtils.createNodeReport(report),
-              getRandomPipelineReports(), null);
+      nodemanager.register(datanodeDetails, TestUtils.createNodeReport(report),
+              getRandomPipelineReports());
       eq.fireEvent(DATANODE_COMMAND,
           new CommandForDatanode<>(datanodeDetails.getUuid(),
               new CloseContainerCommand(1L,
@@ -1339,10 +1329,9 @@ public class TestSCMNodeManager {
 
       LayoutVersionManager versionManager =
           nodemanager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
       eq.processAll(1000L);
       List<SCMCommand> command =
           nodemanager.processHeartbeat(datanodeDetails, layoutInfo);
@@ -1424,7 +1413,7 @@ public class TestSCMNodeManager {
       for (int i = 0; i < nodeCount; i++) {
         DatanodeDetails node = createDatanodeDetails(
             UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
-        nodeManager.register(node, null, null, null);
+        nodeManager.register(node, null, null);
       }
 
       // verify network topology cluster has all the registered nodes
@@ -1467,7 +1456,7 @@ public class TestSCMNodeManager {
       for (int i = 0; i < nodeCount; i++) {
         DatanodeDetails node = createDatanodeDetails(
             UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
-        nodeManager.register(node, null, null, null);
+        nodeManager.register(node, null, null);
       }
 
       // verify network topology cluster has all the registered nodes
@@ -1517,10 +1506,9 @@ public class TestSCMNodeManager {
 
       LayoutVersionManager versionManager =
           nodeManager.getLayoutVersionManager();
-      LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-          .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
-          .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
-          .build();
+      LayoutVersionProto layoutInfo = toLayoutVersionProto(
+          versionManager.getMetadataLayoutVersion(),
+          versionManager.getSoftwareLayoutVersion());
       nodeManager.register(datanodeDetails, TestUtils.createNodeReport(report),
           TestUtils.getRandomPipelineReports(), layoutInfo);
       nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
@@ -1574,7 +1562,7 @@ public class TestSCMNodeManager {
       for (int i = 0; i < nodeCount; i++) {
         DatanodeDetails node = createDatanodeDetails(
             UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
-        nodeManager.register(node, null, null, null);
+        nodeManager.register(node, null, null);
       }
       // test get node
       Assert.assertEquals(0, nodeManager.getNodesByAddress(null).size());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
index 4c47a07..f71aca6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
@@ -97,9 +97,9 @@ public class TestStatisticsUpdate {
         datanode2.getUuid(), storagePath2, 200, 20, 180, null);
 
     nodeManager.register(datanode1,
-        TestUtils.createNodeReport(storageOne), null, null);
+        TestUtils.createNodeReport(storageOne), null);
     nodeManager.register(datanode2,
-        TestUtils.createNodeReport(storageTwo), null, null);
+        TestUtils.createNodeReport(storageTwo), null);
 
     NodeReportProto nodeReportProto1 = TestUtils.createNodeReport(storageOne);
     NodeReportProto nodeReportProto2 = TestUtils.createNodeReport(storageTwo);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index 2fdb36f..8c567e9 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@ -65,7 +65,7 @@ public class TestSCMBlockProtocolServer {
     // add nodes to scm node manager
     nodeManager = scm.getScmNodeManager();
     for (int i = 0; i < NODE_COUNT; i++) {
-      nodeManager.register(randomDatanodeDetails(), null, null, null);
+      nodeManager.register(randomDatanodeDetails(), null, null);
 
     }
     server = scm.getBlockProtocolServer();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 963f9fd..b966a6c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -27,8 +27,6 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto;
@@ -65,6 +63,8 @@ import org.apache.hadoop.util.Time;
 import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
 import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createEndpoint;
 import static org.hamcrest.Matchers.lessThanOrEqualTo;
 import org.junit.AfterClass;
@@ -87,8 +87,6 @@ public class TestEndPoint {
   private static ScmTestMock scmServerImpl;
   private static File testDir;
   private static OzoneConfiguration config;
-  private static final int TEST_SOFTWARE_LAYOUT_VERSION = 0;
-  private static final int TEST_METADATA_LAYOUT_VERSION = 0;
 
   @AfterClass
   public static void tearDown() throws Exception {
@@ -273,10 +271,6 @@ public class TestEndPoint {
   @Test
   public void testRegister() throws Exception {
     DatanodeDetails nodeToRegister = randomDatanodeDetails();
-    LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-        .setMetadataLayoutVersion(TEST_METADATA_LAYOUT_VERSION)
-        .setSoftwareLayoutVersion(TEST_SOFTWARE_LAYOUT_VERSION)
-        .build();
     try (EndpointStateMachine rpcEndPoint = createEndpoint(
         SCMTestUtils.getConf(), serverAddress, 1000)) {
       SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint()
@@ -284,7 +278,8 @@ public class TestEndPoint {
                   .createNodeReport(
                       getStorageReports(nodeToRegister.getUuid())),
               TestUtils.getRandomContainerReports(10),
-              TestUtils.getRandomPipelineReports(), layoutInfo);
+              TestUtils.getRandomPipelineReports(),
+              defaultLayoutVersionProto());
       Assert.assertNotNull(responseProto);
       Assert.assertEquals(nodeToRegister.getUuidString(),
           responseProto.getDatanodeUUID());
@@ -320,9 +315,9 @@ public class TestEndPoint {
     HDDSLayoutVersionManager versionManager =
         Mockito.mock(HDDSLayoutVersionManager.class);
     when(versionManager.getMetadataLayoutVersion())
-        .thenReturn(TEST_METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     when(versionManager.getSoftwareLayoutVersion())
-        .thenReturn(TEST_SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     RegisterEndpointTask endpointTask =
         new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer,
             mock(StateContext.class), versionManager);
@@ -494,7 +489,7 @@ public class TestEndPoint {
 
       HeartbeatEndpointTask endpointTask =
           new HeartbeatEndpointTask(rpcEndPoint, conf, stateContext,
-              stateMachine.getDataNodeVersionManager());
+              stateMachine.getLayoutVersionManager());
       endpointTask.setDatanodeDetailsProto(datanodeDetailsProto);
       endpointTask.call();
       Assert.assertNotNull(endpointTask.getDatanodeDetailsProto());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
index 97cce34..f800533 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 
 import static java.lang.Thread.sleep;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
@@ -58,8 +59,6 @@ import org.mockito.Mockito;
  */
 public class TestSCMNodeMetrics {
 
-  private static final Integer METADATA_LAYOUT_VERSION = 1;
-  private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
   private static SCMNodeManager nodeManager;
 
   private static DatanodeDetails registeredDatanode;
@@ -74,9 +73,9 @@ public class TestSCMNodeMetrics {
     HDDSLayoutVersionManager versionManager =
         Mockito.mock(HDDSLayoutVersionManager.class);
     Mockito.when(versionManager.getMetadataLayoutVersion())
-        .thenReturn(METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     Mockito.when(versionManager.getSoftwareLayoutVersion())
-        .thenReturn(SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     nodeManager = new SCMNodeManager(source, config, publisher,
         new NetworkTopologyImpl(source), versionManager);
 
@@ -87,7 +86,7 @@ public class TestSCMNodeMetrics {
         .build();
 
     nodeManager.register(registeredDatanode, createNodeReport(),
-        PipelineReportsProto.newBuilder().build(), null);
+        PipelineReportsProto.newBuilder().build());
 
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
index c5ac153..36d0fe6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdds.upgrade;
 
-import static java.lang.Thread.sleep;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED;
@@ -26,7 +25,9 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
 import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.INITIAL_VERSION;
 import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_DONE;
 import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.STARTING_FINALIZATION;
 
@@ -100,13 +101,15 @@ public class TestHDDSUpgrade {
     conf = new OzoneConfiguration();
     conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000,
             TimeUnit.MILLISECONDS);
-    int numOfNodes = NUM_DATA_NODES;
+    conf.set(OZONE_DATANODE_PIPELINE_LIMIT, "1");
     cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(numOfNodes)
+        .setNumDatanodes(NUM_DATA_NODES)
         // allow only one FACTOR THREE pipeline.
-        .setTotalPipelineNumLimit(numOfNodes + 1)
+        .setTotalPipelineNumLimit(NUM_DATA_NODES + 1)
         .setHbInterval(1000)
         .setHbProcessorInterval(1000)
+        .setScmLayoutVersion(INITIAL_VERSION.layoutVersion())
+        .setDnLayoutVersion(INITIAL_VERSION.layoutVersion())
         .build();
     cluster.waitForClusterToBeReady();
     scm = cluster.getStorageContainerManager();
@@ -127,7 +130,8 @@ public class TestHDDSUpgrade {
   }
 
   private void testPreUpgradeConditionsSCM() {
-    Assert.assertEquals(0, scmVersionManager.getMetadataLayoutVersion());
+    Assert.assertEquals(INITIAL_VERSION.layoutVersion(),
+        scmVersionManager.getMetadataLayoutVersion());
     for (ContainerInfo ci : scmContainerManager.getContainers()) {
       Assert.assertEquals(HddsProtos.LifeCycleState.OPEN, ci.getState());
     }
@@ -152,7 +156,7 @@ public class TestHDDSUpgrade {
     for (HddsDatanodeService dataNode : cluster.getHddsDatanodes()) {
       DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine();
       HDDSLayoutVersionManager dnVersionManager =
-          dsm.getDataNodeVersionManager();
+          dsm.getLayoutVersionManager();
       Assert.assertEquals(0, dnVersionManager.getMetadataLayoutVersion());
 
     }
@@ -178,19 +182,16 @@ public class TestHDDSUpgrade {
       GenericTestUtils.waitFor(() -> {
         for (HddsDatanodeService dataNode : cluster.getHddsDatanodes()) {
           DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine();
-          HDDSLayoutVersionManager dnVersionManager =
-              dsm.getDataNodeVersionManager();
           try {
             if (dsm.queryUpgradeStatus().status() != FINALIZATION_DONE) {
               return false;
             }
           } catch (IOException e) {
-            e.printStackTrace();
             return false;
           }
         }
         return true;
-      }, 1000, 20000);
+      }, 2000, 20000);
     } catch (TimeoutException | InterruptedException e) {
       Assert.fail("Timeout waiting for Upgrade to complete on Data Nodes.");
     }
@@ -199,7 +200,7 @@ public class TestHDDSUpgrade {
     for (HddsDatanodeService dataNode : cluster.getHddsDatanodes()) {
       DatanodeStateMachine dsm = dataNode.getDatanodeStateMachine();
       HDDSLayoutVersionManager dnVersionManager =
-          dsm.getDataNodeVersionManager();
+          dsm.getLayoutVersionManager();
       Assert.assertEquals(dnVersionManager.getSoftwareLayoutVersion(),
           dnVersionManager.getMetadataLayoutVersion());
       Assert.assertTrue(dnVersionManager.getMetadataLayoutVersion() >= 1);
@@ -251,7 +252,8 @@ public class TestHDDSUpgrade {
   }
 
   @Test
-  public void testLayoutUpgrade() throws Exception {
+  public void testFinalizationFromInitialVersionToLatestVersion()
+      throws Exception {
 
     waitForPipelineCreated();
 
@@ -287,14 +289,18 @@ public class TestHDDSUpgrade {
     testDataNodesStateOnSCM(HEALTHY_READONLY);
 
     // Verify the SCM has driven all the DataNodes through Layout Upgrade.
-    sleep(5000);
     testPostUpgradeConditionsDataNodes();
 
-    // Allow some time for heartbeat exchanges.
-    sleep(5000);
-
-    // All datanodes on the SCM should have moved to HEALTHY-READ-WRITE state.
-    testDataNodesStateOnSCM(HEALTHY);
+    // Need to wait for post finalization heartbeat from DNs.
+    LambdaTestUtils.await(30000, 5000, () -> {
+      try {
+        testDataNodesStateOnSCM(HEALTHY);
+      } catch (Throwable ex) {
+        LOG.info(ex.getMessage());
+        return false;
+      }
+      return true;
+    });
 
     // Verify that new pipeline can be created with upgraded datanodes.
     testPostUpgradePipelineCreation();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 87b2679..b98b314 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -300,6 +300,11 @@ public interface MiniOzoneCluster {
     protected Optional<StorageUnit> streamBufferSizeUnit = Optional.empty();
     protected boolean includeRecon = false;
 
+
+    protected Optional<Integer> omLayoutVersion = Optional.empty();
+    protected Optional<Integer> scmLayoutVersion = Optional.empty();
+    protected Optional<Integer> dnLayoutVersion = Optional.empty();
+
     // Use relative smaller number of handlers for testing
     protected int numOfOmHandlers = 20;
     protected int numOfScmHandlers = 20;
@@ -524,6 +529,21 @@ public interface MiniOzoneCluster {
       return this;
     }
 
+    public Builder setScmLayoutVersion(int layoutVersion) {
+      scmLayoutVersion = Optional.of(layoutVersion);
+      return this;
+    }
+
+    public Builder setOmLayoutVersion(int layoutVersion) {
+      omLayoutVersion = Optional.of(layoutVersion);
+      return this;
+    }
+
+    public Builder setDnLayoutVersion(int layoutVersion) {
+      dnLayoutVersion = Optional.of(layoutVersion);
+      return this;
+    }
+
     /**
      * Constructs and returns MiniOzoneCluster.
      *
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 4e046ab..705d81f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
+import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
 import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
 import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -647,7 +648,12 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
     protected StorageContainerManager createSCM()
         throws IOException, AuthenticationException {
       configureSCM();
-      SCMStorageConfig scmStore = new SCMStorageConfig(conf);
+      SCMStorageConfig scmStore;
+      if (scmLayoutVersion.isPresent()) {
+        scmStore = new SCMStorageConfig(conf, scmLayoutVersion.get());
+      } else {
+        scmStore = new SCMStorageConfig(conf);
+      }
       initializeScmStorage(scmStore);
       StorageContainerManager scm = StorageContainerManager.createSCM(conf);
       HealthyPipelineSafeModeRule rule =
@@ -696,12 +702,29 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
     protected OzoneManager createOM()
         throws IOException, AuthenticationException {
       configureOM();
-      OMStorage omStore = new OMStorage(conf);
+      OMStorage omStore = newOMStorage(conf);
       initializeOmStorage(omStore);
       return OzoneManager.createOm(conf);
     }
 
     /**
+     * Create new OM storage based on layout version.
+     * @param conf configuration object.
+     * @return OMStorage instance.
+     * @throws IOException on error.
+     */
+    protected OMStorage newOMStorage(OzoneConfiguration conf)
+        throws IOException {
+      OMStorage omStore;
+      if (omLayoutVersion.isPresent()) {
+        omStore = new OMStorage(conf, omLayoutVersion.get());
+      } else {
+        omStore = new OMStorage(conf);
+      }
+      return omStore;
+    }
+
+    /**
      * Creates HddsDatanodeService(s) instance.
      *
      * @return List of HddsDatanodeService
@@ -752,9 +775,23 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
         datanode.setConfiguration(dnConf);
         hddsDatanodes.add(datanode);
       }
+      if (dnLayoutVersion.isPresent()) {
+        configureLayoutVersionInDatanodes(hddsDatanodes, dnLayoutVersion.get());
+      }
       return hddsDatanodes;
     }
 
+
+    private void configureLayoutVersionInDatanodes(
+        List<HddsDatanodeService> dns, int layoutVersion) throws IOException {
+      for (HddsDatanodeService dn : dns) {
+        DatanodeLayoutStorage layoutStorage;
+        layoutStorage = new DatanodeLayoutStorage(dn.getConf(),
+            UUID.randomUUID().toString(), layoutVersion);
+        layoutStorage.initialize();
+      }
+    }
+
     private void configureSCM() {
       conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
       conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
index 4df5f22..e74c170 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
@@ -364,7 +364,7 @@ public class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
             // Set metadata/DB dir base path
             String metaDirPath = path + "/" + nodeId;
             config.set(OZONE_METADATA_DIRS, metaDirPath);
-            OMStorage omStore = new OMStorage(config);
+            OMStorage omStore = newOMStorage(config);
             initializeOmStorage(omStore);
 
             OzoneManager om = OzoneManager.createOm(config);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
index c527af9..94fc8f6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.ozone.common.Storage;
 
 import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
+import static org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager.maxLayoutVersion;
 
 /**
  * OMStorage is responsible for management of the StorageDirectories used by
@@ -45,7 +46,12 @@ public class OMStorage extends Storage {
    * @throws IOException if any directories are inaccessible.
    */
   public OMStorage(OzoneConfiguration conf) throws IOException {
-    super(NodeType.OM, getOmDbDir(conf), STORAGE_DIR);
+    super(NodeType.OM, getOmDbDir(conf), STORAGE_DIR, maxLayoutVersion());
+  }
+
+  public OMStorage(OzoneConfiguration conf, int defaultLayoutVersion)
+      throws IOException {
+    super(NodeType.OM, getOmDbDir(conf), STORAGE_DIR, defaultLayoutVersion);
   }
 
   public void setScmId(String scmId) throws IOException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/ECFeatureOnFinalizeAction.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/ECFeatureOnFinalizeAction.java
deleted file mode 100644
index c294dc2..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/ECFeatureOnFinalizeAction.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.upgrade;
-
-import org.apache.hadoop.ozone.om.OzoneManager;
-
-/**
- * Stub OM Action class to help with understanding. Will be removed.
- */
-public class ECFeatureOnFinalizeAction implements OmUpgradeAction {
-
-  @Override
-  public void executeAction(OzoneManager ozoneManager) {
-    // Do blah....
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java
index d834f9b..220814e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java
@@ -27,9 +27,7 @@ import org.apache.hadoop.ozone.upgrade.LayoutFeature;
  */
 public enum OMLayoutFeature implements LayoutFeature {
   //////////////////////////////  //////////////////////////////
-  INITIAL_VERSION(0, "Initial Layout Version"),
-  ERASURE_CODING(1, "",
-      new ECFeatureOnFinalizeAction()); // Mock Feature. To be removed later.
+  INITIAL_VERSION(0, "Initial Layout Version");
 
   //////////////////////////////  //////////////////////////////
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java
index e62c629..e994675 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java
@@ -87,11 +87,6 @@ public final class OMLayoutVersionManager
     registerOzoneManagerRequests();
   }
 
-  public void reset() {
-    requestFactory = null;
-    super.reset();
-  }
-
   private void registerOzoneManagerRequests() {
     try {
       for (Class<? extends OMClientRequest> reqClass : getRequestClasses()) {
@@ -162,4 +157,10 @@ public final class OMLayoutVersionManager
     super.finalized(layoutFeature);
     requestFactory.finalizeFeature(layoutFeature);
   }
+
+  public static int maxLayoutVersion() {
+    OMLayoutFeature[] features = OMLayoutFeature.values();
+    return features[features.length - 1].layoutVersion();
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/UnsupportedMockNewOMRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/UnsupportedMockNewOMRequest.java
deleted file mode 100644
index 0ff7712..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/UnsupportedMockNewOMRequest.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request;
-
-import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.ERASURE_CODING;
-
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.upgrade.BelongsToLayoutVersion;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-
-/**
- * Mock OM request which is unsupported still.
- */
-@BelongsToLayoutVersion(ERASURE_CODING)
-public class UnsupportedMockNewOMRequest extends OMClientRequest {
-
-  public UnsupportedMockNewOMRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(
-      OzoneManager ozoneManager,
-      long transactionLogIndex,
-      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
-    return new OMClientResponse(
-        OMResponse.newBuilder().setSuccess(true).build()) {
-      @Override
-      protected void addToDBBatch(OMMetadataManager omMetadataManager,
-                                  BatchOperation batchOperation) {
-      }
-    };
-  }
-
-  public static String getRequestType() {
-    return UnsupportedMockNewOMRequest.class.getSimpleName();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/OMMockECKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/OMMockECKeyCreateRequest.java
deleted file mode 100644
index c2610f6..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/OMMockECKeyCreateRequest.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.om.request.key;
-
-import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.ERASURE_CODING;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
-import org.apache.hadoop.ozone.om.response.OMClientResponse;
-import org.apache.hadoop.ozone.om.upgrade.BelongsToLayoutVersion;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-
-/**
- * Handles Create EC Key  request. (To be removed later)
- */
-@BelongsToLayoutVersion(ERASURE_CODING)
-public class OMMockECKeyCreateRequest extends OMKeyCreateRequest {
-
-  public OMMockECKeyCreateRequest(OMRequest omRequest) {
-    super(omRequest);
-  }
-
-  @Override
-  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
-    // V2 impl here.
-    return null;
-  }
-
-  @Override
-  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
-      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
-    // V2 impl here.
-    return null;
-  }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java
similarity index 92%
rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java
rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java
index 1f6d470..a82ccb9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureUtil.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.ozone.om.upgrade;
 
-import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.ERASURE_CODING;
-
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -39,7 +37,7 @@ public class OMLayoutFeatureUtil {
    * "disallowed" by just adding the following annotation, thereby keeping the
    * method logic and upgrade logic separate.
    */
-  @DisallowedUntilLayoutVersion(ERASURE_CODING)
+  //@DisallowedUntilLayoutVersion(ERASURE_CODING)
   public String ecMethod() {
     // Blah Blah EC Blah....
     return "ec";
@@ -54,7 +52,8 @@ public class OMLayoutFeatureUtil {
     return "basic";
   }
 
-  // Needed for the Aspect.
+  // A method named 'getOmVersionManager' needed for the Aspect to get
+  // instance of the layout version manager.
   public LayoutVersionManager getOmVersionManager() throws IOException {
     OzoneConfiguration configuration = new OzoneConfiguration();
     Path tempDirWithPrefix = Files.createTempDirectory("TestAspect");
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java
index eedb896..bbfb059 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMLayoutFeatureAspect.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
@@ -53,11 +54,12 @@ public class TestOMLayoutFeatureAspect {
    * should fail, and the second one should pass.
    * @throws Exception
    */
+  @Ignore
   @Test
   public void testCheckLayoutFeature() throws Exception {
     OMLayoutFeatureUtil testObj = new OMLayoutFeatureUtil();
     try {
-      testObj.ecMethod();
+      String s = testObj.ecMethod();
       Assert.fail();
     } catch (Exception ex) {
       OMException omEx = (OMException) ex;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
index 51285f9..8af025a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.ozone.om.upgrade;
 
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
-import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.ERASURE_CODING;
 import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION;
 import static org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager.getRequestClasses;
 import static org.junit.Assert.assertEquals;
@@ -38,8 +37,6 @@ import org.apache.hadoop.ozone.om.OMStorage;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.request.UnsupportedMockNewOMRequest;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -58,15 +55,9 @@ public class TestOMVersionManager {
     when(om.getOmStorage()).thenReturn(omStorage);
 
     assertTrue(omVersionManager.isAllowed(INITIAL_VERSION));
-    assertFalse(omVersionManager.isAllowed(ERASURE_CODING));
     assertEquals(INITIAL_VERSION.layoutVersion(),
         omVersionManager.getMetadataLayoutVersion());
-    assertTrue(omVersionManager.needsFinalization());
-    OMUpgradeFinalizer finalizer = new OMUpgradeFinalizer(omVersionManager);
-    finalizer.finalize("random", om);
     assertFalse(omVersionManager.needsFinalization());
-    assertEquals(ERASURE_CODING.layoutVersion(),
-        omVersionManager.getMetadataLayoutVersion());
   }
 
   @Test
@@ -123,29 +114,4 @@ public class TestOMVersionManager {
       }
     }
   }
-
-  @Test
-  public void testCannotGetUnsupportedOmRequest() throws OMException {
-    OMStorage omStorage = mock(OMStorage.class);
-    when(omStorage.getLayoutVersion()).thenReturn(0);
-    OMLayoutVersionManager omVersionManager =
-        new OMLayoutVersionManager(omStorage);
-    OzoneManager om = mock(OzoneManager.class);
-    when(om.getOmStorage()).thenReturn(omStorage);
-
-    Class<? extends OMClientRequest> requestHandler;
-    try {
-      requestHandler = omVersionManager.getHandler(
-              UnsupportedMockNewOMRequest.class.getSimpleName());
-      Assert.fail();
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains(
-          "No suitable instance found", ex);
-    }
-
-    omVersionManager.unfinalizedFeatures().forEach(omVersionManager::finalized);
-    requestHandler = omVersionManager.getHandler(
-        UnsupportedMockNewOMRequest.class.getSimpleName());
-    Assert.assertNotNull(requestHandler);
-  }
 }
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOmVersionManagerRequestFactory.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOmVersionManagerRequestFactory.java
index 40d14d7..85b394c 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOmVersionManagerRequestFactory.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOmVersionManagerRequestFactory.java
@@ -33,8 +33,6 @@ import org.apache.hadoop.ozone.om.OMStorage;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.ozone.om.request.UnsupportedMockNewOMRequest;
-import org.apache.hadoop.ozone.om.request.key.OMMockECKeyCreateRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.junit.Assert;
@@ -66,14 +64,6 @@ public class TestOmVersionManagerRequestFactory {
     Class<? extends OMClientRequest> requestType =
         omVersionManager.getHandler(CreateKey.name());
     Assert.assertEquals(requestType, OMKeyCreateRequest.class);
-
-    // Finalize the version manager.
-    OMUpgradeFinalizer f = new OMUpgradeFinalizer(omVersionManager);
-    f.finalize("random", om);
-
-    // Try getting 'CreateKey' again. Should return CreateECKey.
-    requestType = omVersionManager.getHandler(CreateKey.name());
-    Assert.assertEquals(requestType, OMMockECKeyCreateRequest.class);
   }
 
   @Test
@@ -88,9 +78,6 @@ public class TestOmVersionManagerRequestFactory {
             .collect(Collectors.toList());
 
     for (Class<? extends OMClientRequest> c : collect) {
-      if (c.equals(UnsupportedMockNewOMRequest.class)) {
-        continue;
-      }
       Method getRequestTypeMethod = null;
       try {
         getRequestTypeMethod = c.getMethod("getRequestType");
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
index 871e039..a659153 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
@@ -37,6 +38,7 @@ import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.Time;
@@ -45,6 +47,7 @@ import com.google.common.collect.ImmutableSet;
 
 import static java.util.stream.Collectors.toList;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -85,7 +88,10 @@ public class ReconNodeManager extends SCMNodeManager {
           iterator = nodeDB.iterator();
       while (iterator.hasNext()) {
         DatanodeDetails datanodeDetails = iterator.next().getValue();
-        register(datanodeDetails, null, null, null);
+        register(datanodeDetails, null, null,
+            LayoutVersionProto.newBuilder()
+                .setMetadataLayoutVersion(0)
+                .setSoftwareLayoutVersion(0).build());
         nodeCount++;
       }
       LOG.info("Loaded {} nodes from node DB.", nodeCount);
@@ -94,6 +100,13 @@ public class ReconNodeManager extends SCMNodeManager {
     }
   }
 
+  @Override
+  public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
+    return VersionResponse.newBuilder()
+        .setVersion(0)
+        .build();
+  }
+
   /**
    * Add a new new node to the NodeDB. Must be called after register.
    * @param datanodeDetails Datanode details.
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index 7c313e9..7ed7896 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -107,7 +107,7 @@ public class ReconStorageContainerManagerFacade
         .createDBStore(ozoneConfiguration, new ReconSCMDBDefinition());
 
     this.scmLayoutVersionManager =
-        new HDDSLayoutVersionManager(scmStorageConfig);
+        new HDDSLayoutVersionManager(scmStorageConfig.getLayoutVersion());
     this.nodeManager =
         new ReconNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
             ReconSCMDBDefinition.NODES.getTable(dbStore),
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index de86d61..02c42d9 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -80,6 +80,7 @@ import org.junit.Before;
 import org.junit.Test;
 
 import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto;
 import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
 import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
 import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager;
@@ -140,8 +141,6 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
   private static final String PROMETHEUS_TEST_RESPONSE_FILE =
       "prometheus-test-response.txt";
   private ReconUtils reconUtilsMock;
-  private static final int TEST_SOFTWARE_LAYOUT_VERSION = 0;
-  private static final int TEST_METADATA_LAYOUT_VERSION = 0;
 
   private void initializeInjector() throws Exception {
     reconOMMetadataManager = getTestReconOmMetadataManager(
@@ -326,10 +325,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
         NodeReportProto.newBuilder()
             .addStorageReport(storageReportProto3)
             .addStorageReport(storageReportProto4).build();
-    LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
-        .setMetadataLayoutVersion(TEST_METADATA_LAYOUT_VERSION)
-        .setSoftwareLayoutVersion(TEST_SOFTWARE_LAYOUT_VERSION)
-        .build();
+    LayoutVersionProto layoutInfo = defaultLayoutVersionProto();
 
     try {
       reconScm.getDatanodeProtocolServer()
@@ -338,7 +334,8 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
       reconScm.getDatanodeProtocolServer()
           .register(extendedDatanodeDetailsProto2, nodeReportProto2,
               ContainerReportsProto.newBuilder().build(),
-              PipelineReportsProto.newBuilder().build(), null);
+              PipelineReportsProto.newBuilder().build(),
+              defaultLayoutVersionProto());
       // Process all events in the event queue
       reconScm.getEventQueue().processAll(1000);
     } catch (Exception ex) {
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
index 9fb4cdd..b664f09 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
@@ -45,6 +45,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
 import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.CONTAINERS;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
 import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
 import org.junit.After;
@@ -68,8 +69,6 @@ public class AbstractReconContainerManagerTest {
   private ReconContainerManager containerManager;
   private DBStore store;
   private HDDSLayoutVersionManager layoutVersionManager;
-  public static final int SOFTWARE_LAYOUT_VERSION = 1;
-  public static final int METADATA_LAYOUT_VERSION = 1;
 
   @Before
   public void setUp() throws Exception {
@@ -83,9 +82,9 @@ public class AbstractReconContainerManagerTest {
     EventQueue eventQueue = new EventQueue();
     layoutVersionManager = mock(HDDSLayoutVersionManager.class);
     when(layoutVersionManager.getSoftwareLayoutVersion())
-        .thenReturn(SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     when(layoutVersionManager.getMetadataLayoutVersion())
-        .thenReturn(METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     NodeManager nodeManager =
         new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
             layoutVersionManager);
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index d0eacc6..065406d 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.recon.scm;
 
 import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
@@ -87,14 +88,14 @@ public class TestReconIncrementalContainerReportHandler
     this.versionManager =
         Mockito.mock(HDDSLayoutVersionManager.class);
     Mockito.when(versionManager.getMetadataLayoutVersion())
-        .thenReturn(METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     Mockito.when(versionManager.getSoftwareLayoutVersion())
-        .thenReturn(SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
 
     NodeManager nodeManager =
         new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap,
             versionManager);
-    nodeManager.register(datanodeDetails, null, null, null);
+    nodeManager.register(datanodeDetails, null, null);
 
     ReconContainerManager containerManager = getContainerManager();
     ReconIncrementalContainerReportHandler reconIcr =
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
index 9374a04..c49104b 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.recon.scm;
 
 import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
+import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -32,7 +33,6 @@ import java.util.UUID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
@@ -70,7 +70,8 @@ public class TestReconNodeManager {
         temporaryFolder.newFolder().getAbsolutePath());
     conf.set(OZONE_SCM_NAMES, "localhost");
     reconStorageConfig = new ReconStorageConfig(conf);
-    versionManager = new HDDSLayoutVersionManager(reconStorageConfig);
+    versionManager = new HDDSLayoutVersionManager(
+        reconStorageConfig.getLayoutVersion());
     store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
   }
 
@@ -96,7 +97,7 @@ public class TestReconNodeManager {
     String uuidString = datanodeDetails.getUuidString();
 
     // Register a random datanode.
-    reconNodeManager.register(datanodeDetails, null, null, null);
+    reconNodeManager.register(datanodeDetails, null, null);
     reconNewNodeHandler.onMessage(reconNodeManager.getNodeByUuid(uuidString),
         null);
 
@@ -118,8 +119,7 @@ public class TestReconNodeManager {
     // Upon processing the heartbeat, the illegal command should be filtered out
     List<SCMCommand> returnedCmds =
         reconNodeManager.processHeartbeat(datanodeDetails,
-            LayoutVersionProto.newBuilder().setMetadataLayoutVersion(0)
-                .setSoftwareLayoutVersion(0).build());
+            defaultLayoutVersionProto());
     assertEquals(1, returnedCmds.size());
     assertEquals(SCMCommandProto.Type.reregisterCommand,
         returnedCmds.get(0).getType());
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
index a670717..3b6c0d0 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.ozone.recon.scm.ReconPipelineFactory.ReconPipelineProvi
 
 import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
+import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
 import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
 import org.junit.After;
@@ -62,9 +63,6 @@ import static org.mockito.Mockito.mock;
  */
 public class TestReconPipelineManager {
 
-  private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
-  private static final Integer METADATA_LAYOUT_VERSION = 1;
-
   @Rule
   public TemporaryFolder temporaryFolder = new TemporaryFolder();
 
@@ -120,9 +118,9 @@ public class TestReconPipelineManager {
     this.versionManager =
         Mockito.mock(HDDSLayoutVersionManager.class);
     Mockito.when(versionManager.getMetadataLayoutVersion())
-        .thenReturn(METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     Mockito.when(versionManager.getSoftwareLayoutVersion())
-        .thenReturn(SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     NodeManager nodeManager =
         new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
             versionManager);
@@ -163,9 +161,9 @@ public class TestReconPipelineManager {
     this.versionManager =
         Mockito.mock(HDDSLayoutVersionManager.class);
     Mockito.when(versionManager.getMetadataLayoutVersion())
-        .thenReturn(METADATA_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     Mockito.when(versionManager.getSoftwareLayoutVersion())
-        .thenReturn(SOFTWARE_LAYOUT_VERSION);
+        .thenReturn(maxLayoutVersion());
     NodeManager nodeManager =
         new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
             versionManager);


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org