You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by um...@apache.org on 2022/04/19 20:29:26 UTC

[ozone] branch HDDS-3816-ec updated: HDDS-5909. EC: Onboard EC into upgrade framework (#3262)

This is an automated email from the ASF dual-hosted git repository.

umamahesh pushed a commit to branch HDDS-3816-ec
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-3816-ec by this push:
     new 70b547be23 HDDS-5909. EC: Onboard EC into upgrade framework (#3262)
70b547be23 is described below

commit 70b547be239aeee65fa3b67f64cb62f48deecde7
Author: Istvan Fajth <pi...@cloudera.com>
AuthorDate: Tue Apr 19 22:29:20 2022 +0200

    HDDS-5909. EC: Onboard EC into upgrade framework (#3262)
---
 .../protocol/StorageContainerLocationProtocol.java |   6 +-
 .../hadoop/hdds/upgrade/HDDSLayoutFeature.java     |   4 +-
 .../org/apache/hadoop/ozone/ClientVersion.java     |   3 +
 .../upgrade/TestDatanodeUpgradeToScmHA.java        |  17 +-
 ...lockLocationProtocolServerSideTranslatorPB.java |  12 ++
 ...inerLocationProtocolServerSideTranslatorPB.java | 221 ++++++++++++++++++++-
 .../hadoop/ozone/om/exceptions/OMException.java    |   3 +-
 hadoop-ozone/dist/src/main/compose/ozone/test.sh   |   2 +-
 .../compose/upgrade/compose/ha/docker-compose.yaml |  22 +-
 .../src/main/compose/upgrade/compose/ha/load.sh    |   2 +-
 .../upgrade/compose/non-ha/docker-compose.yaml     |  22 +-
 .../main/compose/upgrade/compose/non-ha/load.sh    |   2 +-
 hadoop-ozone/dist/src/main/compose/upgrade/test.sh |   8 +-
 .../dist/src/main/compose/upgrade/testlib.sh       |   8 +
 .../non-rolling-upgrade/1.2.1-1.3.0/callback.sh    |  84 ++++++++
 .../upgrade/upgrades/non-rolling-upgrade/driver.sh |   1 -
 .../dist/src/main/compose/xcompat/docker-config    |   2 +
 hadoop-ozone/dist/src/main/compose/xcompat/test.sh |  41 ++++
 .../src/main/smoketest/ec/backward-compat.robot    | 103 ++++++++++
 .../dist/src/main/smoketest/ec/basic.robot         |  15 +-
 .../dist/src/main/smoketest/ec/lib.resource        |  42 ++++
 .../src/main/smoketest/ec/upgrade-ec-check.robot   |  45 +++++
 .../dist/src/main/smoketest/upgrade/prepare.robot  |   2 +-
 .../src/main/proto/OmClientProtocol.proto          |   1 +
 .../om/request/bucket/OMBucketCreateRequest.java   |  28 +++
 .../request/bucket/OMBucketSetPropertyRequest.java |  30 +++
 .../om/request/file/OMDirectoryCreateRequest.java  |  27 +++
 .../ozone/om/request/file/OMFileCreateRequest.java |  25 +++
 .../om/request/key/OMAllocateBlockRequest.java     |  26 +++
 .../ozone/om/request/key/OMKeyCommitRequest.java   |  26 +++
 .../ozone/om/request/key/OMKeyCreateRequest.java   |  25 +++
 .../S3InitiateMultipartUploadRequest.java          |  29 +++
 .../multipart/S3MultipartUploadAbortRequest.java   |  27 +++
 .../S3MultipartUploadCommitPartRequest.java        |  28 ++-
 .../S3MultipartUploadCompleteRequest.java          |  29 ++-
 .../om/request/validation/RequestValidations.java  |  19 +-
 .../hadoop/ozone/om/upgrade/OMLayoutFeature.java   |   5 +-
 .../ozone/om/upgrade/OMLayoutFeatureAspect.java    |   4 +-
 ...OzoneManagerProtocolServerSideTranslatorPB.java |  11 +-
 .../protocolPB/OzoneManagerRequestHandler.java     | 178 ++++++++++++++++-
 .../request/validation/TestRequestValidations.java |  29 ++-
 41 files changed, 1148 insertions(+), 66 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index 619f8353fb..d915a20c91 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -224,7 +224,7 @@ public interface StorageContainerLocationProtocol extends Closeable {
    * @param state The node health
    * @param clientVersion Client's version number
    * @return List of Datanodes.
-   * @see org.apache.hadoop.ozone.ClientVersions
+   * @see org.apache.hadoop.ozone.ClientVersion
    */
   List<HddsProtos.Node> queryNode(HddsProtos.NodeOperationalState opState,
       HddsProtos.NodeState state, HddsProtos.QueryScope queryScope,
@@ -385,7 +385,7 @@ public interface StorageContainerLocationProtocol extends Closeable {
    * @return List of DatanodeUsageInfoProto. Each element contains info such as
    * capacity, SCMused, and remaining space.
    * @throws IOException
-   * @see org.apache.hadoop.ozone.ClientVersions
+   * @see org.apache.hadoop.ozone.ClientVersion
    */
   List<HddsProtos.DatanodeUsageInfoProto> getDatanodeUsageInfo(
       String ipaddress, String uuid, int clientVersion) throws IOException;
@@ -399,7 +399,7 @@ public interface StorageContainerLocationProtocol extends Closeable {
    * @return List of DatanodeUsageInfoProto. Each element contains info such as
    * capacity, SCMUsed, and remaining space.
    * @throws IOException
-   * @see org.apache.hadoop.ozone.ClientVersions
+   * @see org.apache.hadoop.ozone.ClientVersion
    */
   List<HddsProtos.DatanodeUsageInfoProto> getDatanodeUsageInfo(
       boolean mostUsed, int count, int clientVersion) throws IOException;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java
index 2bc1a6718c..4217055ecd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeature.java
@@ -31,7 +31,9 @@ public enum HDDSLayoutFeature implements LayoutFeature {
   INITIAL_VERSION(0, "Initial Layout Version"),
   DATANODE_SCHEMA_V2(1, "Datanode RocksDB Schema Version 2 (with column " +
       "families)"),
-  SCM_HA(2, "Storage Container Manager HA");
+  SCM_HA(2, "Storage Container Manager HA"),
+  ERASURE_CODED_STORAGE_SUPPORT(3, "Ozone version with built in support for"
+      + " Erasure Coded block data storage.");
 
   //////////////////////////////  //////////////////////////////
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java
index ec00ca2f26..b8e2542bde 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java
@@ -35,6 +35,9 @@ public enum ClientVersion implements ComponentVersion {
   VERSION_HANDLES_UNKNOWN_DN_PORTS(1,
       "Client version that handles the REPLICATION port in DatanodeDetails."),
 
+  ERASURE_CODING_SUPPORT(2, "This client version has support for Erasure"
+      + " Coding."),
+
   FUTURE_VERSION(-1, "Used internally when the server side is older and an"
       + " unknown client version has arrived from the client.");
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
index d882ca4ed4..34fb79a9c5 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
@@ -192,7 +192,7 @@ public class TestDatanodeUpgradeToScmHA {
     // restarted with SCM HA config and gets a different SCM ID.
     conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     changeScmID();
-    restartDatanode(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
+    restartDatanode(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), true);
     // Make sure the existing container can be read.
     readChunk(exportWriteChunk2, pipeline);
 
@@ -289,7 +289,7 @@ public class TestDatanodeUpgradeToScmHA {
 
     /// FINALIZED: Restart datanode to upgrade the failed volume ///
 
-    restartDatanode(HDDSLayoutFeature.SCM_HA.layoutVersion());
+    restartDatanode(HDDSLayoutFeature.SCM_HA.layoutVersion(), false);
 
     Assert.assertEquals(1,
         dsm.getContainer().getVolumeSet().getVolumesList().size());
@@ -344,7 +344,7 @@ public class TestDatanodeUpgradeToScmHA {
     changeScmID();
     // A new volume is added that must be formatted.
     File preFinVolume2 = addVolume();
-    restartDatanode(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
+    restartDatanode(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), true);
 
     Assert.assertEquals(2,
         dsm.getContainer().getVolumeSet().getVolumesList().size());
@@ -378,7 +378,7 @@ public class TestDatanodeUpgradeToScmHA {
     File finVolume = addVolume();
     // Yet another SCM ID is received this time, but it should not matter.
     changeScmID();
-    restartDatanode(HDDSLayoutFeature.SCM_HA.layoutVersion());
+    restartDatanode(HDDSLayoutFeature.SCM_HA.layoutVersion(), false);
     Assert.assertEquals(3,
         dsm.getContainer().getVolumeSet().getVolumesList().size());
     Assert.assertEquals(0,
@@ -521,7 +521,7 @@ public class TestDatanodeUpgradeToScmHA {
     callVersionEndpointTask();
   }
 
-  public void restartDatanode(int expectedMlv)
+  public void restartDatanode(int expectedMlv, boolean exactMatch)
       throws Exception {
     // Stop existing datanode.
     DatanodeDetails dd = dsm.getDatanodeDetails();
@@ -532,7 +532,12 @@ public class TestDatanodeUpgradeToScmHA {
         conf, null, null,
         null);
     int mlv = dsm.getLayoutVersionManager().getMetadataLayoutVersion();
-    Assert.assertEquals(expectedMlv, mlv);
+    if (exactMatch) {
+      Assert.assertEquals(expectedMlv, mlv);
+    } else {
+      Assert.assertTrue("Expected minimum mlv(" + expectedMlv
+          + ") is smaller than mlv(" + mlv + ").", expectedMlv <= mlv);
+    }
 
     callVersionEndpointTask();
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
index 4e6aad7ceb..bdf248bea5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
@@ -127,6 +128,17 @@ public final class ScmBlockLocationProtocolServerSideTranslatorPB
     try {
       switch (request.getCmdType()) {
       case AllocateScmBlock:
+        if (scm.getLayoutVersionManager().needsFinalization() &&
+            !scm.getLayoutVersionManager()
+                .isAllowed(HDDSLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)
+        ) {
+          if (request.getAllocateScmBlockRequest().hasEcReplicationConfig()) {
+            throw new SCMException("Cluster is not finalized yet, it is"
+                + " not enabled to create blocks with Erasure Coded"
+                + " replication type.",
+                SCMException.ResultCodes.INTERNAL_ERROR);
+          }
+        }
         response.setAllocateScmBlockResponse(allocateScmBlock(
             request.getAllocateScmBlockRequest(), request.getVersion()));
         break;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index 5b926f9518..c2ba0ffffa 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -101,13 +101,16 @@ import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.ha.RatisUtil;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocolPB.OzonePBHelper;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
+import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -120,6 +123,13 @@ import java.util.Optional;
 
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto.Error.errorPipelineAlreadyExists;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto.Error.success;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type.GetContainer;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type.GetContainerWithPipeline;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type.GetContainerWithPipelineBatch;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type.GetExistContainerWithPipelinesInBatch;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type.GetPipeline;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type.ListContainer;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type.ListPipelines;
 import static org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol.ADMIN_COMMAND_TYPE;
 
 /**
@@ -135,6 +145,16 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
   private static final Logger LOG =
       LoggerFactory.getLogger(
           StorageContainerLocationProtocolServerSideTranslatorPB.class);
+  private static final String ERROR_LIST_CONTAINS_EC_REPLICATION_CONFIG =
+      "The returned list of containers contains containers with Erasure Coded"
+          + " replication type, which the client won't be able to understand."
+          + " Please upgrade the client to a version that supports Erasure"
+          + " Coded data, and retry!";
+  private static final String ERROR_RESPONSE_CONTAINS_EC_REPLICATION_CONFIG =
+      "The returned container data contains Erasure Coded replication"
+          + " information, which the client won't be able to understand."
+          + " Please upgrade the client to a version that supports Erasure"
+          + " Coded data, and retry!";
 
   private final StorageContainerLocationProtocol impl;
   private final StorageContainerManager scm;
@@ -172,9 +192,196 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
           scm.getScmHAManager().getRatisServer().triggerNotLeaderException(),
           scm.getClientRpcPort(), scm.getScmId());
     }
-    return dispatcher
+    // After the request interceptor (now validator) framework is extended to
+    // this server interface, this should be removed and solved via new
+    // annotated interceptors.
+    boolean checkResponseForECRepConfig = false;
+    if (request.getVersion() <
+        ClientVersion.ERASURE_CODING_SUPPORT.toProtoValue()) {
+      if (request.getCmdType() == GetContainer
+          || request.getCmdType() == ListContainer
+          || request.getCmdType() == GetContainerWithPipeline
+          || request.getCmdType() == GetContainerWithPipelineBatch
+          || request.getCmdType() == GetExistContainerWithPipelinesInBatch
+          || request.getCmdType() == ListPipelines
+          || request.getCmdType() == GetPipeline) {
+
+        checkResponseForECRepConfig = true;
+      }
+    }
+    ScmContainerLocationResponse response = dispatcher
         .processRequest(request, this::processRequest, request.getCmdType(),
             request.getTraceID());
+    if (checkResponseForECRepConfig) {
+      try {
+        switch (response.getCmdType()) {
+        case GetContainer:
+          disallowECReplicationConfigInGetContainerResponse(response);
+          break;
+        case ListContainer:
+          disallowECReplicationConfigInListContainerResponse(response);
+          break;
+        case GetContainerWithPipeline:
+          disallowECReplicationConfigInGetContainerWithPipelineResponse(
+              response);
+          break;
+        case GetContainerWithPipelineBatch:
+          disallowECReplicationConfigInGetContainerWithPipelineBatchResponse(
+              response);
+          break;
+        case GetExistContainerWithPipelinesInBatch:
+          disallowECReplicationConfigInGetExistContainerWithPipelineBatchResp(
+              response);
+          break;
+        case ListPipelines:
+          disallowECReplicationConfigInListPipelinesResponse(response);
+          break;
+        case GetPipeline:
+          disallowECReplicationConfigInGetPipelineResponse(response);
+          break;
+        default:
+        }
+      } catch (SCMException e) {
+        throw new ServiceException(e);
+      }
+    }
+    return response;
+  }
+
+  private void disallowECReplicationConfigInListContainerResponse(
+      ScmContainerLocationResponse response) throws SCMException {
+    if (!response.hasScmListContainerResponse()) {
+      return;
+    }
+    for (HddsProtos.ContainerInfoProto containerInfo :
+        response.getScmListContainerResponse().getContainersList()) {
+      if (containerInfo.hasEcReplicationConfig()) {
+        throw new SCMException(ERROR_LIST_CONTAINS_EC_REPLICATION_CONFIG,
+            SCMException.ResultCodes.INTERNAL_ERROR);
+      }
+    }
+  }
+
+  private void disallowECReplicationConfigInGetContainerResponse(
+      ScmContainerLocationResponse response) throws SCMException {
+    if (!response.hasGetContainerResponse()) {
+      return;
+    }
+    if (!response.getGetContainerResponse().hasContainerInfo()) {
+      return;
+    }
+    if (response.getGetContainerResponse().getContainerInfo()
+        .hasEcReplicationConfig()) {
+      throw new SCMException(ERROR_RESPONSE_CONTAINS_EC_REPLICATION_CONFIG,
+          SCMException.ResultCodes.INTERNAL_ERROR);
+    }
+  }
+
+  private void disallowECReplicationConfigInGetContainerWithPipelineResponse(
+      ScmContainerLocationResponse response) throws SCMException {
+    if (!response.hasGetContainerWithPipelineResponse()) {
+      return;
+    }
+    if (!response.getGetContainerWithPipelineResponse()
+        .hasContainerWithPipeline()) {
+      return;
+    }
+    if (response.getGetContainerWithPipelineResponse()
+        .getContainerWithPipeline().hasContainerInfo()) {
+      HddsProtos.ContainerInfoProto containerInfo =
+          response.getGetContainerWithPipelineResponse()
+              .getContainerWithPipeline().getContainerInfo();
+      if (containerInfo.hasEcReplicationConfig()) {
+        throw new SCMException(ERROR_RESPONSE_CONTAINS_EC_REPLICATION_CONFIG,
+            SCMException.ResultCodes.INTERNAL_ERROR);
+      }
+    }
+    if (response.getGetContainerWithPipelineResponse()
+        .getContainerWithPipeline().hasPipeline()) {
+      HddsProtos.Pipeline pipeline =
+          response.getGetContainerWithPipelineResponse()
+              .getContainerWithPipeline().getPipeline();
+      if (pipeline.hasEcReplicationConfig()) {
+        throw new SCMException(ERROR_RESPONSE_CONTAINS_EC_REPLICATION_CONFIG,
+            SCMException.ResultCodes.INTERNAL_ERROR);
+      }
+    }
+  }
+
+  private void
+      disallowECReplicationConfigInGetContainerWithPipelineBatchResponse(
+      ScmContainerLocationResponse response) throws SCMException {
+    if (!response.hasGetContainerWithPipelineBatchResponse()) {
+      return;
+    }
+    List<HddsProtos.ContainerWithPipeline> cwps =
+        response.getGetContainerWithPipelineBatchResponse()
+            .getContainerWithPipelinesList();
+    checkForECReplicationConfigIn(cwps);
+  }
+
+  private void
+      disallowECReplicationConfigInGetExistContainerWithPipelineBatchResp(
+      ScmContainerLocationResponse response) throws SCMException {
+    if (!response.hasGetExistContainerWithPipelinesInBatchResponse()) {
+      return;
+    }
+    List<HddsProtos.ContainerWithPipeline> cwps =
+        response.getGetExistContainerWithPipelinesInBatchResponse()
+            .getContainerWithPipelinesList();
+    checkForECReplicationConfigIn(cwps);
+  }
+
+  private void checkForECReplicationConfigIn(
+      List<HddsProtos.ContainerWithPipeline> cwps)
+      throws SCMException {
+    for (HddsProtos.ContainerWithPipeline cwp : cwps) {
+      if (cwp.hasContainerInfo()) {
+        if (cwp.getContainerInfo().hasEcReplicationConfig()) {
+          throw new SCMException(ERROR_LIST_CONTAINS_EC_REPLICATION_CONFIG,
+              SCMException.ResultCodes.INTERNAL_ERROR);
+        }
+      }
+      if (cwp.hasPipeline()) {
+        if (cwp.getPipeline().hasEcReplicationConfig()) {
+          throw new SCMException(ERROR_LIST_CONTAINS_EC_REPLICATION_CONFIG,
+              SCMException.ResultCodes.INTERNAL_ERROR);
+        }
+      }
+    }
+  }
+
+  private void disallowECReplicationConfigInListPipelinesResponse(
+      ScmContainerLocationResponse response) throws SCMException {
+    if (!response.hasListPipelineResponse()) {
+      return;
+    }
+    for (HddsProtos.Pipeline pipeline :
+        response.getListPipelineResponse().getPipelinesList()) {
+      if (pipeline.hasEcReplicationConfig()) {
+        throw new SCMException("The returned list of pipelines contains"
+            + " pipelines with Erasure Coded replication type, which the"
+            + " client won't be able to understand."
+            + " Please upgrade the client to a version that supports Erasure"
+            + " Coded data, and retry!",
+            SCMException.ResultCodes.INTERNAL_ERROR);
+      }
+    }
+  }
+
+  private void disallowECReplicationConfigInGetPipelineResponse(
+      ScmContainerLocationResponse response) throws SCMException {
+    if (!response.hasGetPipelineResponse()) {
+      return;
+    }
+    if (response.getPipelineResponse().getPipeline().hasEcReplicationConfig()) {
+      throw new SCMException("The returned pipeline data contains"
+          + " Erasure Coded replication information, which the client won't"
+          + " be able to understand."
+          + " Please upgrade the client to a version that supports Erasure"
+          + " Coded data, and retry!",
+          SCMException.ResultCodes.INTERNAL_ERROR);
+    }
   }
 
   @SuppressWarnings("checkstyle:methodlength")
@@ -251,6 +458,18 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
                 request.getScmCloseContainerRequest()))
             .build();
       case AllocatePipeline:
+        if (scm.getLayoutVersionManager().needsFinalization() &&
+            !scm.getLayoutVersionManager().isAllowed(
+                HDDSLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)
+        ) {
+          if (request.getPipelineRequest().getReplicationType() ==
+              HddsProtos.ReplicationType.EC) {
+            throw new SCMException("Cluster is not finalized yet, it is"
+                + " not enabled to create pipelines with Erasure Coded"
+                + " replication type.",
+                SCMException.ResultCodes.INTERNAL_ERROR);
+          }
+        }
         return ScmContainerLocationResponse.newBuilder()
             .setCmdType(request.getCmdType())
             .setStatus(Status.OK)
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index 953aa23878..168b2c8bb4 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -242,6 +242,7 @@ public class OMException extends IOException {
     UPDATE_LAYOUT_VERSION_FAILED,
     LAYOUT_FEATURE_FINALIZATION_FAILED,
     PREPARE_FAILED,
-    NOT_SUPPORTED_OPERATION_WHEN_PREPARED
+    NOT_SUPPORTED_OPERATION_WHEN_PREPARED,
+    NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION
   }
 }
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
index 73aacf4c9e..ac3b273979 100755
--- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
@@ -57,7 +57,7 @@ execute_debug_tests
 execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-fso-ofs-link ozonefs/ozonefs.robot
 execute_robot_test scm -v SCHEME:o3fs -v BUCKET_TYPE:bucket -N ozonefs-fso-o3fs-bucket ozonefs/ozonefs.robot
 
-execute_robot_test scm ec
+execute_robot_test scm ec/basic.robot
 
 stop_docker_env
 
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml
index 24ff9d7377..7f52d73a8e 100644
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml
@@ -124,6 +124,24 @@ services:
       - ${OZONE_VOLUME}/dn3:/data
       - *ozone-dir
       - *transformation
+  dn4:
+    <<: *datanode
+    networks:
+      net:
+        ipv4_address: 10.9.0.18
+    volumes:
+      - ${OZONE_VOLUME}/dn4:/data
+      - *ozone-dir
+      - *transformation
+  dn5:
+    <<: *datanode
+    networks:
+      net:
+        ipv4_address: 10.9.0.19
+    volumes:
+      - ${OZONE_VOLUME}/dn5:/data
+      - *ozone-dir
+      - *transformation
   recon:
     command: ["ozone","recon"]
     <<: *common-config
@@ -131,7 +149,7 @@ services:
       <<: *replication
     networks:
       net:
-        ipv4_address: 10.9.0.18
+        ipv4_address: 10.9.0.20
     ports:
       - 9888:9888
     volumes:
@@ -145,7 +163,7 @@ services:
       <<: *replication
     networks:
       net:
-        ipv4_address: 10.9.0.19
+        ipv4_address: 10.9.0.21
     ports:
       - 9878:9878
     volumes:
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/load.sh b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/load.sh
index 902eb12a1b..43c668804b 100755
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/load.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/load.sh
@@ -25,4 +25,4 @@ source "$TEST_DIR/testlib.sh"
 
 export COMPOSE_FILE="$TEST_DIR/compose/ha/docker-compose.yaml"
 export OM_SERVICE_ID=omservice
-create_data_dirs "${OZONE_VOLUME}"/{om1,om2,om3,dn1,dn2,dn3,recon,s3g,scm}
+create_data_dirs "${OZONE_VOLUME}"/{om1,om2,om3,dn1,dn2,dn3,dn4,dn5,recon,s3g,scm}
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml
index 4b1d64e0ce..14617ccef5 100644
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml
@@ -101,6 +101,24 @@ services:
       - ${OZONE_VOLUME}/dn3:/data
       - *ozone-dir
       - *transformation
+  dn4:
+    <<: *datanode
+    networks:
+      net:
+        ipv4_address: 10.9.0.16
+    volumes:
+      - ${OZONE_VOLUME}/dn4:/data
+      - *ozone-dir
+      - *transformation
+  dn5:
+    <<: *datanode
+    networks:
+      net:
+        ipv4_address: 10.9.0.17
+    volumes:
+      - ${OZONE_VOLUME}/dn5:/data
+      - *ozone-dir
+      - *transformation
   recon:
     command: ["ozone","recon"]
     <<: *common-config
@@ -108,7 +126,7 @@ services:
       <<: *replication
     networks:
       net:
-        ipv4_address: 10.9.0.16
+        ipv4_address: 10.9.0.18
     ports:
       - 9888:9888
     volumes:
@@ -122,7 +140,7 @@ services:
       <<: *replication
     networks:
       net:
-        ipv4_address: 10.9.0.17
+        ipv4_address: 10.9.0.19
     ports:
       - 9878:9878
     volumes:
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/load.sh b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/load.sh
index a8980e75fc..0e8fc0567f 100755
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/load.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/load.sh
@@ -24,4 +24,4 @@ set +u
 source "$TEST_DIR/testlib.sh"
 
 export COMPOSE_FILE="$TEST_DIR/compose/non-ha/docker-compose.yaml"
-create_data_dirs "${OZONE_VOLUME}"/{om,dn1,dn2,dn3,recon,s3g,scm}
+create_data_dirs "${OZONE_VOLUME}"/{om,dn1,dn2,dn3,dn4,dn5,recon,s3g,scm}
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
index 687f9bd1e0..35358e3997 100755
--- a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
@@ -16,7 +16,7 @@
 # limitations under the License.
 
 # Version that will be run using the local build.
-: "${OZONE_CURRENT_VERSION:=1.2.0}"
+: "${OZONE_CURRENT_VERSION:=1.3.0}"
 export OZONE_CURRENT_VERSION
 
 TEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )
@@ -31,8 +31,10 @@ run_test_scripts ${tests} || RESULT=$?
 
 RESULT_DIR="$ALL_RESULT_DIR" create_results_dir
 
-# Upgrade tests to be run.
-run_test non-rolling-upgrade 1.1.0 1.2.0
+# Upgrade tests to be run. In CI we want to run just one set, but for a release
+# we might advise the release manager to run the full matrix.
+#run_test non-rolling-upgrade 1.1.0 1.3.0
+run_test non-rolling-upgrade 1.2.1 1.3.0
 
 generate_report "upgrade" "$ALL_RESULT_DIR"
 
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
index 1b6b05d101..b3fa1df3bc 100755
--- a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
@@ -150,3 +150,11 @@ check_om_mlv() {
 check_scm_mlv() {
   check_mlv "$1" /data/metadata/scm/current/VERSION "$2"
 }
+
+check_ec_is_disabled() {
+  execute_robot_test scm --include pre-finalized-ec-tests ec/upgrade-ec-check.robot
+}
+
+check_ec_is_enabled() {
+  execute_robot_test scm --include post-finalized-ec-tests ec/upgrade-ec-check.robot
+}
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/1.2.1-1.3.0/callback.sh b/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/1.2.1-1.3.0/callback.sh
new file mode 100755
index 0000000000..f414ddf43f
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/1.2.1-1.3.0/callback.sh
@@ -0,0 +1,84 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source "$TEST_DIR"/testlib.sh
+
+# Helper function, not a callback.
+_check_hdds_mlvs() {
+  mlv="$1"
+  check_scm_mlv scm "$mlv"
+  check_dn_mlv dn1 "$mlv"
+  check_dn_mlv dn2 "$mlv"
+  check_dn_mlv dn3 "$mlv"
+}
+
+# Helper function, not a callback.
+_check_om_mlvs() {
+  mlv="$1"
+  check_om_mlv om1 "$mlv"
+  check_om_mlv om2 "$mlv"
+  check_om_mlv om3 "$mlv"
+}
+
+setup() {
+  export OZONE_OM_PREPARE='true'
+}
+
+with_old_version() {
+  generate old1
+  validate old1
+}
+
+with_new_version_pre_finalized() {
+  _check_hdds_mlvs 2
+  _check_om_mlvs 0
+
+  validate old1
+#   HDDS-6261: overwrite the same keys intentionally
+  generate old1 --exclude create-volume-and-bucket
+
+  generate new1
+  validate new1
+
+  check_ec_is_disabled
+}
+
+with_old_version_downgraded() {
+  validate old1
+  validate new1
+
+  generate old2
+  validate old2
+
+  # HDDS-6261: overwrite the same keys again to trigger the precondition check
+  # that exists <= 1.1.0 OM
+  generate old1 --exclude create-volume-and-bucket
+}
+
+with_new_version_finalized() {
+  _check_hdds_mlvs 3
+  _check_om_mlvs 1
+
+  validate old1
+  validate new1
+  validate old2
+
+  generate new2
+  validate new2
+
+  check_ec_is_enabled
+}
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/driver.sh b/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/driver.sh
index e59bec59c3..7b95238bf1 100755
--- a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/driver.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/driver.sh
@@ -72,7 +72,6 @@ echo "--- RUNNING WITH NEW VERSION $OZONE_UPGRADE_TO PRE-FINALIZED ---"
 OUTPUT_NAME="$OZONE_UPGRADE_TO"-pre-finalized
 OZONE_KEEP_RESULTS=true start_docker_env
 callback with_new_version_pre_finalized
-
 prepare_oms
 stop_docker_env
 prepare_for_image "$OZONE_UPGRADE_FROM"
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config
index 8bbb4d103d..29b3696a8a 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config
@@ -14,6 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem
+
 OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 OZONE-SITE.XML_hdds.scm.safemode.min.datanode=3
 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
index a46590fdde..f583a2b342 100755
--- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
@@ -73,6 +73,45 @@ test_cross_compatibility() {
   KEEP_RUNNING=false stop_docker_env
 }
 
+test_ec_cross_compatibility() {
+  echo "Running Erasure Coded storage backward compatibility tests."
+  local cluster_versions_with_ec="1.3.0"
+  local non_ec_client_versions="1.0.0 1.1.0 1.2.1"
+
+  for cluster_version in ${cluster_versions_with_ec}; do
+    export COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${cluster_version}
+    OZONE_KEEP_RESULTS=true start_docker_env 5
+
+    echo -n "Generating data locally...   "
+    dd if=/dev/urandom of=/tmp/1mb bs=1048576 count=1 >/dev/null 2>&1
+    dd if=/dev/urandom of=/tmp/2mb bs=1048576 count=2 >/dev/null 2>&1
+    dd if=/dev/urandom of=/tmp/3mb bs=1048576 count=3 >/dev/null 2>&1
+    echo "done"
+    echo -n "Copy data into client containers...   "
+    for container in $(docker ps --format '{{.Names}}' | grep client); do
+      docker cp /tmp/1mb ${container}:/tmp/1mb
+      docker cp /tmp/2mb ${container}:/tmp/2mb
+      docker cp /tmp/3mb ${container}:/tmp/3mb
+    done
+    echo "done"
+    rm -f /tmp/1mb /tmp/2mb /tmp/3mb
+
+
+    local prefix=$(LC_CTYPE=C tr -dc '[:alnum:]' < /dev/urandom | head -c 5 | tr '[:upper:]' '[:lower:]')
+    OZONE_DIR=/opt/hadoop
+    execute_robot_test new_client --include setup-ec-data -N "xcompat-cluster-${cluster_version}-setup-data" -v prefix:"${prefix}" ec/backward-compat.robot
+     OZONE_DIR=/opt/ozone
+
+    for client_version in ${non_ec_client_versions}; do
+      client="old_client_${client_version//./_}"
+      unset OUTPUT_PATH
+      execute_robot_test "${client}" --include test-ec-compat -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${cluster_version}" -v prefix:"${prefix}" ec/backward-compat.robot
+    done
+
+    KEEP_RUNNING=false stop_docker_env
+  done
+}
+
 create_results_dir
 
 # current cluster with various clients
@@ -84,4 +123,6 @@ for cluster_version in ${old_versions}; do
   COMPOSE_FILE=old-cluster.yaml:clients.yaml test_cross_compatibility ${cluster_version}
 done
 
+test_ec_cross_compatibility
+
 generate_report
diff --git a/hadoop-ozone/dist/src/main/smoketest/ec/backward-compat.robot b/hadoop-ozone/dist/src/main/smoketest/ec/backward-compat.robot
new file mode 100644
index 0000000000..74ab547791
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/ec/backward-compat.robot
@@ -0,0 +1,103 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Test EC backward compatibility
+Library             OperatingSystem
+Resource            lib.resource
+
+*** Test Cases ***
+Setup Cluster Data
+    [Tags]  setup-ec-data
+    Prepare Data For Xcompat Tests
+
+Test Read Key Compat
+    [Tags]  test-ec-compat
+    Key Should Match Local File     /${prefix}vol1/${prefix}ratis/${prefix}3mb      /tmp/3mb
+    Key Should Match Local File     /${prefix}vol1/${prefix}default/${prefix}3mb    /tmp/3mb
+
+    ${result} =     Execute and checkrc         ozone sh key get /${prefix}vol1/${prefix}ec/${prefix}3mb /tmp/${prefix}3mb       255
+                    Should Contain  ${result}   NOT_SUPPORTED_OPERATION
+
+Test Listing Compat
+    [Tags]  test-ec-compat
+    ${result} =     Execute     ozone sh volume list | jq -r '.name'
+                    Should contain  ${result}   ${prefix}vol1
+    ${result} =     Execute     ozone sh bucket list /${prefix}vol1/ | jq -r '.name'
+                    Should contain  ${result}   ${prefix}default
+                    Should contain  ${result}   ${prefix}ratis
+                    Should contain  ${result}   ${prefix}ec
+    ${result} =     Execute     ozone sh key list /${prefix}vol1/${prefix}default/ | jq -r '[.name, .replicationType, (.replicationFactor | tostring)] | join (" ")'
+                    Should contain  ${result}   ${prefix}3mb RATIS 3
+    ${result} =     Execute     ozone sh key list /${prefix}vol1/${prefix}ratis/ | jq -r '[.name, .replicationType, (.replicationFactor | tostring)] | join (" ")'
+                    Should contain  ${result}   ${prefix}3mb RATIS 3
+
+    ${result} =     Execute and checkrc         ozone sh key list /${prefix}vol1/${prefix}ec/   255
+                    Should contain  ${result}   NOT_SUPPORTED_OPERATION
+
+Test Info Compat
+    [Tags]  test-ec-compat
+    ${result} =     Execute     ozone sh volume info ${prefix}vol1 | jq -r '.name'
+                    Should contain  ${result}   ${prefix}vol1
+    ${result} =     Execute     ozone sh bucket info /${prefix}vol1/${prefix}default | jq -r '[.name, .replicationType, .replicationFactor] | join (" ")'
+                    Should contain  ${result}   ${prefix}default        # there is no replication config in the old client for bucket info
+    ${result} =     Execute     ozone sh bucket info /${prefix}vol1/${prefix}ratis | jq -r '[.name, .replicationType, .replicationFactor] | join (" ")'
+                    Should contain  ${result}   ${prefix}ratis        # there is no replication config in the old client for bucket info
+    ${result} =     Execute     ozone sh bucket info /${prefix}vol1/${prefix}ec | jq -r '[.name, .replicationType, .replicationFactor] | join (" ")'
+                    Should contain  ${result}   ${prefix}ec        # there is no replication config in the old client for bucket info
+
+Test FS Compat
+    [Tags]  test-ec-compat
+    ${result} =     Execute     ozone fs -ls ofs://om/
+                    Should contain  ${result}   /${prefix}vol1
+    ${result} =     Execute     ozone fs -ls ofs://om/${prefix}vol1/
+                    Should contain  ${result}   /${prefix}vol1/${prefix}default
+                    Should contain  ${result}   /${prefix}vol1/${prefix}ratis
+                    Should contain  ${result}   /${prefix}vol1/${prefix}ec
+    ${result} =     Execute     ozone fs -ls ofs://om/${prefix}vol1/${prefix}default/${prefix}3mb
+                    Should contain  ${result}   /${prefix}vol1/${prefix}default/${prefix}3mb
+    ${result} =     Execute     ozone fs -ls ofs://om/${prefix}vol1/${prefix}ratis/${prefix}3mb
+                    Should contain  ${result}   /${prefix}vol1/${prefix}ratis/${prefix}3mb
+
+    ${result} =     Execute and checkrc    ozone fs -ls ofs://om/${prefix}vol1/${prefix}ec/     1
+                    Should contain  ${result}   ls: The list of keys contains keys with Erasure Coded replication set
+    ${result} =     Execute and checkrc    ozone fs -ls ofs://om/${prefix}vol1/${prefix}ec/${prefix}3mb     1
+                    Should contain  ${result}   : No such file or directory
+    ${result} =     Execute and checkrc    ozone fs -get ofs://om/${prefix}vol1/${prefix}ec/${prefix}3mb    1
+                    Should contain  ${result}   : No such file or directory
+
+Test FS Client Can Read Own Writes
+    [Tags]  test-ec-compat
+    Execute         ozone fs -put /tmp/1mb ofs://om/${prefix}vol1/${prefix}default/${prefix}1mb
+    Execute         ozone fs -put /tmp/1mb ofs://om/${prefix}vol1/${prefix}ratis/${prefix}1mb
+    Execute         ozone fs -put /tmp/1mb ofs://om/${prefix}vol1/${prefix}ec/${prefix}1mb
+    Key Should Match Local File     /${prefix}vol1/${prefix}ratis/${prefix}1mb      /tmp/1mb
+    Key Should Match Local File     /${prefix}vol1/${prefix}ratis/${prefix}1mb      /tmp/1mb
+    Key Should Match Local File     /${prefix}vol1/${prefix}ratis/${prefix}1mb      /tmp/1mb
+    Execute         ozone fs -rm -skipTrash ofs://om/${prefix}vol1/${prefix}default/${prefix}1mb
+    Execute         ozone fs -rm -skipTrash ofs://om/${prefix}vol1/${prefix}ratis/${prefix}1mb
+    Execute         ozone fs -rm -skipTrash ofs://om/${prefix}vol1/${prefix}ec/${prefix}1mb
+
+Test Client Can Read Own Writes
+    [Tags]  test-ec-compat
+    Execute         ozone sh key put /${prefix}vol1/${prefix}default/${prefix}2mb /tmp/2mb
+    Execute         ozone sh key put /${prefix}vol1/${prefix}ratis/${prefix}2mb /tmp/2mb
+    Execute         ozone sh key put /${prefix}vol1/${prefix}ec/${prefix}2mb /tmp/2mb
+    Key Should Match Local File     /${prefix}vol1/${prefix}ratis/${prefix}2mb      /tmp/2mb
+    Key Should Match Local File     /${prefix}vol1/${prefix}ratis/${prefix}2mb      /tmp/2mb
+    Key Should Match Local File     /${prefix}vol1/${prefix}ratis/${prefix}2mb      /tmp/2mb
+    Execute         ozone sh key delete /${prefix}vol1/${prefix}default/${prefix}2mb
+    Execute         ozone sh key delete /${prefix}vol1/${prefix}ratis/${prefix}2mb
+    Execute         ozone sh key delete /${prefix}vol1/${prefix}ec/${prefix}2mb
diff --git a/hadoop-ozone/dist/src/main/smoketest/ec/basic.robot b/hadoop-ozone/dist/src/main/smoketest/ec/basic.robot
index 121f30f5ce..751211d25d 100644
--- a/hadoop-ozone/dist/src/main/smoketest/ec/basic.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/ec/basic.robot
@@ -18,23 +18,10 @@ Documentation       Test EC shell commands
 Library             OperatingSystem
 Resource            ../commonlib.robot
 Resource            ../ozone-lib/shell.robot
+Resource            lib.resource
 Suite Setup         Prepare For Tests
 
-*** Variables ***
-${SCM}       scm
-
-*** Keywords ***
-
-Prepare For Tests
-    ${random} =         Generate Random String  5  [NUMBERS]
-    Set Suite Variable  ${prefix}  ${random}
-    Execute             dd if=/dev/urandom of=/tmp/1mb bs=1048576 count=1
-    Execute             dd if=/dev/urandom of=/tmp/2mb bs=1048576 count=2
-    Execute             dd if=/dev/urandom of=/tmp/3mb bs=1048576 count=3
-    Execute             dd if=/dev/urandom of=/tmp/100mb bs=1048576 count=100
-
 *** Test Cases ***
-
 Test Bucket Creation
     ${result} =     Execute             ozone sh volume create /${prefix}vol1
                     Should not contain  ${result}       Failed
diff --git a/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource b/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource
new file mode 100644
index 0000000000..7c774a23cd
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Keywords and other resources used by EC tests
+Library             OperatingSystem
+Resource            ../commonlib.robot
+Resource            ../ozone-lib/shell.robot
+
+*** Variables ***
+${SCM}       scm
+
+*** Keywords ***
+Prepare For Tests
+    ${random}    Generate Random String  5  [NUMBERS]
+    Set Suite Variable  ${prefix}  ${random}
+    Execute             dd if=/dev/urandom of=/tmp/1mb bs=1048576 count=1
+    Execute             dd if=/dev/urandom of=/tmp/2mb bs=1048576 count=2
+    Execute             dd if=/dev/urandom of=/tmp/3mb bs=1048576 count=3
+    Execute             dd if=/dev/urandom of=/tmp/100mb bs=1048576 count=100
+
+# xcompat/test.sh creates unified test data files in /tmp for client containers
+Prepare Data For Xcompat Tests
+    Execute             ozone sh volume create /${prefix}vol1
+    Execute             ozone sh bucket create /${prefix}vol1/${prefix}default
+    Execute             ozone sh bucket create --replication 3 --type RATIS /${prefix}vol1/${prefix}ratis
+    Execute             ozone sh bucket create --replication rs-3-2-1024k --type EC /${prefix}vol1/${prefix}ec
+    Execute             ozone sh key put /${prefix}vol1/${prefix}default/${prefix}3mb /tmp/3mb
+    Execute             ozone sh key put /${prefix}vol1/${prefix}ratis/${prefix}3mb /tmp/3mb
+    Execute             ozone sh key put /${prefix}vol1/${prefix}ec/${prefix}3mb /tmp/3mb
diff --git a/hadoop-ozone/dist/src/main/smoketest/ec/upgrade-ec-check.robot b/hadoop-ozone/dist/src/main/smoketest/ec/upgrade-ec-check.robot
new file mode 100644
index 0000000000..dbfd9e81eb
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/ec/upgrade-ec-check.robot
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Test EC during upgrade
+Library             OperatingSystem
+Resource            lib.resource
+
+*** Test Cases ***
+Test EC Prior To Finalization
+    [Tags]  pre-finalized-ec-tests
+    Execute         ozone sh volume create /ectest
+    ${result} =     Execute and checkrc     ozone sh bucket create --replication rs-3-2-1024k --type EC /ectest/ectest     255
+                    Should contain  ${result}   NOT_SUPPORTED_OPERATION
+    Execute         ozone sh bucket create /ectest/testpropchange
+    ${result} =     Execute and checkrc     ozone sh bucket set-replication-config -r rs-3-2-1024k -t EC /ectest/testpropchange     255
+                    Should contain  ${result}   NOT_SUPPORTED_OPERATION
+    ${result} =     Execute and checkrc     ozone sh key put -r rs-3-2-1024k -t EC /ectest/testpropchange/core-site.xml /etc/hadoop/core-site.xml     255
+                    Should contain  ${result}   NOT_SUPPORTED_OPERATION
+
+
+
+Test EC After Finalization
+    [Tags]  post-finalized-ec-tests
+    Execute         ozone sh volume create /ectest-new
+    Execute         ozone sh bucket create --replication rs-3-2-1024k --type EC /ectest-new/ectest
+                    Verify Bucket EC Replication Config     /ectest-new/ectest  RS  3   2   1048576
+    Execute         ozone sh bucket create /ectest-new/testpropchange
+    Execute         ozone sh bucket set-replication-config -r rs-3-2-1024k -t EC /ectest-new/testpropchange
+                    Verify Bucket EC Replication Config     /ectest-new/testpropchange  RS  3   2   1048576
+    Execute         ozone sh key put -r rs-3-2-1024k -t EC /ectest-new/ectest/core-site.xml /etc/hadoop/core-site.xml
+                    Key Should Match Local File     /ectest-new/ectest/core-site.xml        /etc/hadoop/core-site.xml
+                    Verify Key EC Replication Config    /ectest-new/ectest/core-site.xml    RS  3   2   1048576
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/upgrade/prepare.robot b/hadoop-ozone/dist/src/main/smoketest/upgrade/prepare.robot
index 0f6d7a0976..b1b4095d6e 100644
--- a/hadoop-ozone/dist/src/main/smoketest/upgrade/prepare.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/upgrade/prepare.robot
@@ -19,7 +19,7 @@ Resource            ../commonlib.robot
 Test Timeout        5 minutes
 Test Setup          Run Keyword if    '${SECURITY_ENABLED}' == 'true'    Kinit test user     testuser     testuser.keytab
 
-** Test Cases ***
+*** Test Cases ***
 Prepare Ozone Manager
     ${result} =        Execute      ozone admin om prepare -id %{OM_SERVICE_ID}
                        Wait Until Keyword Succeeds      3min       10sec     Should contain   ${result}   OM Preparation successful!
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 407954b7ba..1bad21582f 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -356,6 +356,7 @@ enum Status {
     LAYOUT_FEATURE_FINALIZATION_FAILED = 72;
     PREPARE_FAILED = 73;
     NOT_SUPPORTED_OPERATION_WHEN_PREPARED = 74;
+    NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION = 75;
 }
 
 /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index 7644746be9..37d3e71d6b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -41,8 +41,13 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketEncryptionInfoProto;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
@@ -51,6 +56,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateB
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
@@ -365,4 +371,26 @@ public class OMBucketCreateRequest extends OMClientRequest {
 
   }
 
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = Type.CreateBucket
+  )
+  public static OMRequest disallowCreateBucketWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager()
+        .isAllowed(OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      if (req.getCreateBucketRequest()
+          .getBucketInfo().hasDefaultReplicationConfig()
+          && req.getCreateBucketRequest().getBucketInfo()
+          .getDefaultReplicationConfig().hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
index 13b3623705..d237c1c880 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
@@ -30,6 +30,12 @@ import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -303,4 +309,28 @@ public class OMBucketSetPropertyRequest extends OMClientRequest {
     return true;
   }
 
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = Type.SetBucketProperty
+  )
+  public static OMRequest disallowSetBucketPropertyWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager()
+        .isAllowed(OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      SetBucketPropertyRequest propReq =
+          req.getSetBucketPropertyRequest();
+      if (propReq.hasBucketArgs()
+          && propReq.getBucketArgs().hasDefaultReplicationConfig()
+          && propReq.getBucketArgs().getDefaultReplicationConfig()
+          .hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index a7fdef0bcd..62e8bc2076 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -39,6 +39,12 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.slf4j.Logger;
@@ -388,4 +394,25 @@ public class OMDirectoryCreateRequest extends OMKeyRequest {
   static long getMaxNumOfRecursiveDirs() {
     return MAX_NUM_OF_RECURSIVE_DIRS;
   }
+
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = Type.CreateDirectory
+  )
+  public static OMRequest disallowCreateDirectoryWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager().
+        isAllowed(OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      if (req.getCreateDirectoryRequest().getKeyArgs()
+          .hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index a80bcca38d..21996913a6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -36,7 +36,12 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneConfigUtil;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
 import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -387,4 +392,24 @@ public class OMFileCreateRequest extends OMKeyRequest {
           OMException.ResultCodes.DIRECTORY_NOT_FOUND);
     }
   }
+
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = CreateFile
+  )
+  public static OMRequest disallowCreateFileWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager()
+        .isAllowed(OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      if (req.getCreateFileRequest().getKeyArgs().hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
index 824a78d13a..c151bcbacd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java
@@ -30,6 +30,12 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
@@ -253,4 +259,24 @@ public class OMAllocateBlockRequest extends OMKeyRequest {
 
     return omClientResponse;
   }
+
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = Type.AllocateBlock
+  )
+  public static OMRequest disallowAllocateBlockWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager()
+        .isAllowed(OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      if (req.getAllocateBlockRequest().getKeyArgs().hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index b308b98ae6..047b5f3ac1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -38,6 +38,12 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -321,4 +327,24 @@ public class OMKeyCommitRequest extends OMKeyRequest {
               commitKeyRequest);
     }
   }
+
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = Type.CommitKey
+  )
+  public static OMRequest disallowCommitKeyWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager()
+        .isAllowed(OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      if (req.getCommitKeyRequest().getKeyArgs().hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index b769a68c93..d45b2b2350 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -37,6 +37,11 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -374,4 +379,24 @@ public class OMKeyCreateRequest extends OMKeyRequest {
           createKeyRequest);
     }
   }
+
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = Type.CreateKey
+  )
+  public static OMRequest disallowCreateKeyWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager()
+        .isAllowed(OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      if (req.getCreateKeyRequest().getKeyArgs().hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index 97002898e6..4aaec04bf3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OzoneConfigUtil;
 import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -33,13 +34,19 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponse;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
@@ -269,4 +276,26 @@ public class S3InitiateMultipartUploadRequest extends OMKeyRequest {
           multipartInfoInitiateRequest);
     }
   }
+
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = Type.InitiateMultiPartUpload
+  )
+  public static OMRequest
+      disallowInitiateMultiPartUploadWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager()
+        .isAllowed(OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      if (req.getInitiateMultiPartUploadRequest().getKeyArgs()
+          .hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index d63016a72d..0959c71988 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -28,6 +28,12 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.slf4j.Logger;
@@ -255,4 +261,25 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
         volumeName, bucketName, keyName, multipartUploadID);
     return multipartKey;
   }
+
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = Type.AbortMultiPartUpload
+  )
+  public static OMRequest disallowAbortMultiPartUploadWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager().isAllowed(
+        OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      if (req.getAbortMultiPartUploadRequest().getKeyArgs()
+          .hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index 1392a5b204..901f5833b8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -33,9 +33,14 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.s3.multipart
     .S3MultipartUploadCommitPartResponse;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .KeyArgs;
@@ -47,6 +52,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.util.Time;
@@ -334,5 +340,25 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
     return omMetadataManager.getMultipartKey(volumeName, bucketName,
         keyName, uploadID);
   }
-}
 
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = Type.CommitMultiPartUpload
+  )
+  public static OMRequest disallowCommitMultiPartUploadWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager().isAllowed(
+        OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      if (req.getCommitMultiPartUploadRequest().getKeyArgs()
+          .hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index f3f0c3d7e0..3f205a8cd8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -48,8 +48,13 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse;
+import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest;
@@ -57,6 +62,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Multipa
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.util.Time;
@@ -565,5 +571,26 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
           new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
     }
   }
-}
 
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION,
+      processingPhase = RequestProcessingPhase.PRE_PROCESS,
+      requestType = Type.CompleteMultiPartUpload
+  )
+  public static OMRequest
+      disallowCompleteMultiPartUploadWithECReplicationConfig(
+      OMRequest req, ValidationContext ctx) throws OMException {
+    if (!ctx.versionManager().isAllowed(
+        OMLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {
+      if (req.getCompleteMultiPartUploadRequest().getKeyArgs()
+          .hasEcReplicationConfig()) {
+        throw new OMException("Cluster does not have the Erasure Coded"
+            + " Storage support feature finalized yet, but the request contains"
+            + " an Erasure Coded replication type. Rejecting the request,"
+            + " please finalize the cluster upgrade and then try again.",
+            OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
+      }
+    }
+    return req;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/RequestValidations.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/RequestValidations.java
index fe34b74609..e2e127a4fa 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/RequestValidations.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/RequestValidations.java
@@ -17,6 +17,7 @@
 package org.apache.hadoop.ozone.om.request.validation;
 
 import com.google.protobuf.ServiceException;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.slf4j.Logger;
@@ -59,20 +60,26 @@ public class RequestValidations {
     return this;
   }
 
-  public OMRequest validateRequest(OMRequest request) throws ServiceException {
+  public OMRequest validateRequest(OMRequest request)
+      throws Exception {
     List<Method> validations = registry.validationsFor(
         conditions(request), request.getCmdType(), PRE_PROCESS);
 
     OMRequest validatedRequest = request.toBuilder().build();
     try {
       for (Method m : validations) {
-        validatedRequest =
-            (OMRequest) m.invoke(null, validatedRequest, context);
         LOG.debug("Running the {} request pre-process validation from {}.{}",
             m.getName(), m.getDeclaringClass().getPackage().getName(),
             m.getDeclaringClass().getSimpleName());
+        validatedRequest =
+            (OMRequest) m.invoke(null, validatedRequest, context);
+      }
+    } catch (InvocationTargetException e) {
+      if (e.getCause() instanceof OMException) {
+        throw (OMException) e.getCause();
       }
-    } catch (IllegalAccessException | InvocationTargetException e) {
+      throw new ServiceException(e);
+    } catch (IllegalAccessException e) {
       throw new ServiceException(e);
     }
     return validatedRequest;
@@ -86,11 +93,11 @@ public class RequestValidations {
     OMResponse validatedResponse = response.toBuilder().build();
     try {
       for (Method m : validations) {
-        validatedResponse =
-            (OMResponse) m.invoke(null, request, response, context);
         LOG.debug("Running the {} request post-process validation from {}.{}",
             m.getName(), m.getDeclaringClass().getPackage().getName(),
             m.getDeclaringClass().getSimpleName());
+        validatedResponse =
+            (OMResponse) m.invoke(null, request, response, context);
       }
     } catch (InvocationTargetException | IllegalAccessException e) {
       throw new ServiceException(e);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java
index 5338c168bc..aa639b2976 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java
@@ -29,7 +29,10 @@ import org.apache.hadoop.ozone.upgrade.LayoutFeature;
  */
 public enum OMLayoutFeature implements LayoutFeature {
   //////////////////////////////  //////////////////////////////
-  INITIAL_VERSION(0, "Initial Layout Version");
+  INITIAL_VERSION(0, "Initial Layout Version"),
+
+  ERASURE_CODED_STORAGE_SUPPORT(1, "Ozone version with built in support for"
+      + " Erasure Coded block data storage.");
 
 
   ///////////////////////////////  /////////////////////////////
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java
index a6fe773a24..cb1eadbe96 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeatureAspect.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.ozone.om.upgrade;
 
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION;
 
 import java.io.IOException;
 import java.lang.reflect.Method;
@@ -83,7 +83,7 @@ public class OMLayoutFeatureAspect {
           layoutFeature.name(),
           layoutFeature.layoutVersion(),
           lvm.getMetadataLayoutVersion()),
-          NOT_SUPPORTED_OPERATION);
+          NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION);
     }
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
index 06aa6c546a..a2efa6f165 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException;
 import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException;
 import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
@@ -131,7 +132,15 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
   @Override
   public OMResponse submitRequest(RpcController controller,
       OMRequest request) throws ServiceException {
-    OMRequest validatedRequest = requestValidations.validateRequest(request);
+    OMRequest validatedRequest;
+    try {
+      validatedRequest = requestValidations.validateRequest(request);
+    } catch (Exception e) {
+      if (e instanceof OMException) {
+        return createErrorResponse(request, (OMException) e);
+      }
+      throw new ServiceException(e);
+    }
 
     OMResponse response = 
         dispatcher.processRequest(validatedRequest, this::processRequest,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
index 0087f71f3e..ecec94592d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.stream.Collectors;
 
+import com.google.protobuf.ServiceException;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.UpgradeFinalizationStatus;
@@ -47,6 +48,10 @@ import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
+import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase;
+import org.apache.hadoop.ozone.om.request.validation.ValidationCondition;
+import org.apache.hadoop.ozone.om.request.validation.ValidationContext;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessRequest;
@@ -60,6 +65,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBuc
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest;
@@ -74,7 +80,9 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Multipa
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadListPartsResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RepeatedKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
@@ -395,10 +403,36 @@ public class OzoneManagerRequestHandler implements RequestHandler {
 
     resp.setKeyInfo(keyInfo.getProtobuf(keyArgs.getHeadOp(), clientVersion));
 
-
     return resp.build();
   }
 
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.OLDER_CLIENT_REQUESTS,
+      processingPhase = RequestProcessingPhase.POST_PROCESS,
+      requestType = Type.LookupKey
+  )
+  public static OMResponse disallowLookupKeyResponseWithECReplicationConfig(
+      OMRequest req, OMResponse resp, ValidationContext ctx)
+      throws ServiceException {
+    if (!resp.hasLookupKeyResponse()) {
+      return resp;
+    }
+    if (resp.getLookupKeyResponse().getKeyInfo().hasEcReplicationConfig()) {
+      resp = resp.toBuilder()
+          .setStatus(Status.NOT_SUPPORTED_OPERATION)
+          .setMessage("Key is a key with Erasure Coded replication, which"
+              + " the client can not understand.\n"
+              + "Please upgrade the client before trying to read the key: "
+              + req.getLookupKeyRequest().getKeyArgs().getVolumeName()
+              + "/" + req.getLookupKeyRequest().getKeyArgs().getBucketName()
+              + "/" + req.getLookupKeyRequest().getKeyArgs().getKeyName()
+              + ".")
+          .clearLookupKeyResponse()
+          .build();
+    }
+    return resp;
+  }
+
   private ListBucketsResponse listBuckets(ListBucketsRequest request)
       throws IOException {
     ListBucketsResponse.Builder resp =
@@ -434,6 +468,33 @@ public class OzoneManagerRequestHandler implements RequestHandler {
     return resp.build();
   }
 
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.OLDER_CLIENT_REQUESTS,
+      processingPhase = RequestProcessingPhase.POST_PROCESS,
+      requestType = Type.ListKeys
+  )
+  public static OMResponse disallowListKeysResponseWithECReplicationConfig(
+      OMRequest req, OMResponse resp, ValidationContext ctx)
+      throws ServiceException {
+    if (!resp.hasListKeysResponse()) {
+      return resp;
+    }
+    List<KeyInfo> keys = resp.getListKeysResponse().getKeyInfoList();
+    for (KeyInfo key : keys) {
+      if (key.hasEcReplicationConfig()) {
+        resp = resp.toBuilder()
+            .setStatus(Status.NOT_SUPPORTED_OPERATION)
+            .setMessage("The list of keys contains keys with Erasure Coded"
+                + " replication set, hence the client is not able to"
+                + " represent all the keys returned. Please upgrade the"
+                + " client to get the list of keys.")
+            .clearListKeysResponse()
+            .build();
+      }
+    }
+    return resp;
+  }
+
   private ListTrashResponse listTrash(ListTrashRequest request,
       int clientVersion) throws IOException {
 
@@ -454,6 +515,36 @@ public class OzoneManagerRequestHandler implements RequestHandler {
     return resp.build();
   }
 
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.OLDER_CLIENT_REQUESTS,
+      processingPhase = RequestProcessingPhase.POST_PROCESS,
+      requestType = Type.ListTrash
+  )
+  public static OMResponse disallowListTrashWithECReplicationConfig(
+      OMRequest req, OMResponse resp, ValidationContext ctx)
+      throws ServiceException {
+    if (!resp.hasListTrashResponse()) {
+      return resp;
+    }
+    List<RepeatedKeyInfo> repeatedKeys =
+        resp.getListTrashResponse().getDeletedKeysList();
+    for (RepeatedKeyInfo repeatedKey : repeatedKeys) {
+      for (KeyInfo key : repeatedKey.getKeyInfoList()) {
+        if (key.hasEcReplicationConfig()) {
+          resp = resp.toBuilder()
+              .setStatus(Status.NOT_SUPPORTED_OPERATION)
+              .setMessage("The list of keys contains keys with Erasure Coded"
+                  + " replication set, hence the client is not able to"
+                  + " represent all the keys returned. Please upgrade the"
+                  + " client to get the list of keys.")
+              .clearListTrashResponse()
+              .build();
+        }
+      }
+    }
+    return resp;
+  }
+
   private ServiceListResponse getServiceList(ServiceListRequest request)
       throws IOException {
     ServiceListResponse.Builder resp = ServiceListResponse.newBuilder();
@@ -581,6 +672,35 @@ public class OzoneManagerRequestHandler implements RequestHandler {
     return rb.build();
   }
 
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.OLDER_CLIENT_REQUESTS,
+      processingPhase = RequestProcessingPhase.POST_PROCESS,
+      requestType = Type.GetFileStatus
+  )
+  public static OMResponse disallowGetFileStatusWithECReplicationConfig(
+      OMRequest req, OMResponse resp, ValidationContext ctx)
+      throws ServiceException {
+    if (!resp.hasGetFileStatusResponse()) {
+      return resp;
+    }
+    if (resp.getGetFileStatusResponse().getStatus().getKeyInfo()
+        .hasEcReplicationConfig()) {
+      resp = resp.toBuilder()
+          .setStatus(Status.NOT_SUPPORTED_OPERATION)
+          .setMessage("Key is a key with Erasure Coded replication, which"
+              + " the client can not understand."
+              + " Please upgrade the client before trying to read the key info"
+              + " for "
+              + req.getGetFileStatusRequest().getKeyArgs().getVolumeName()
+              + "/" + req.getGetFileStatusRequest().getKeyArgs().getBucketName()
+              + "/" + req.getGetFileStatusRequest().getKeyArgs().getKeyName()
+              + ".")
+          .clearGetFileStatusResponse()
+          .build();
+    }
+    return resp;
+  }
+
   private LookupFileResponse lookupFile(LookupFileRequest request,
       int clientVersion) throws IOException {
     KeyArgs keyArgs = request.getKeyArgs();
@@ -597,6 +717,34 @@ public class OzoneManagerRequestHandler implements RequestHandler {
         .build();
   }
 
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.OLDER_CLIENT_REQUESTS,
+      processingPhase = RequestProcessingPhase.POST_PROCESS,
+      requestType = Type.LookupFile
+  )
+  public static OMResponse disallowLookupFileWithECReplicationConfig(
+      OMRequest req, OMResponse resp, ValidationContext ctx)
+      throws ServiceException {
+    if (!resp.hasLookupFileResponse()) {
+      return resp;
+    }
+    if (resp.getLookupFileResponse().getKeyInfo().hasEcReplicationConfig()) {
+      resp = resp.toBuilder()
+          .setStatus(Status.NOT_SUPPORTED_OPERATION)
+          .setMessage("Key is a key with Erasure Coded replication, which the"
+              + " client can not understand."
+              + " Please upgrade the client before trying to read the key info"
+              + " for "
+              + req.getLookupFileRequest().getKeyArgs().getVolumeName()
+              + "/" + req.getLookupFileRequest().getKeyArgs().getBucketName()
+              + "/" + req.getLookupFileRequest().getKeyArgs().getKeyName()
+              + ".")
+          .clearLookupFileResponse()
+          .build();
+    }
+    return resp;
+  }
+
   private ListStatusResponse listStatus(
       ListStatusRequest request, int clientVersion) throws IOException {
     KeyArgs keyArgs = request.getKeyArgs();
@@ -620,6 +768,34 @@ public class OzoneManagerRequestHandler implements RequestHandler {
     return listStatusResponseBuilder.build();
   }
 
+  @RequestFeatureValidator(
+      conditions = ValidationCondition.OLDER_CLIENT_REQUESTS,
+      processingPhase = RequestProcessingPhase.POST_PROCESS,
+      requestType = Type.ListStatus
+  )
+  public static OMResponse disallowListStatusResponseWithECReplicationConfig(
+      OMRequest req, OMResponse resp, ValidationContext ctx)
+      throws ServiceException {
+    if (!resp.hasListStatusResponse()) {
+      return resp;
+    }
+    List<OzoneFileStatusProto> statuses =
+        resp.getListStatusResponse().getStatusesList();
+    for (OzoneFileStatusProto status : statuses) {
+      if (status.getKeyInfo().hasEcReplicationConfig()) {
+        resp = resp.toBuilder()
+            .setStatus(Status.NOT_SUPPORTED_OPERATION)
+            .setMessage("The list of keys contains keys with Erasure Coded"
+                + " replication set, hence the client is not able to"
+                + " represent all the keys returned."
+                + " Please upgrade the client to get the list of keys.")
+            .clearListStatusResponse()
+            .build();
+      }
+    }
+    return resp;
+  }
+
   private FinalizeUpgradeProgressResponse reportUpgradeProgress(
       FinalizeUpgradeProgressRequest request) throws IOException {
     String upgradeClientId = request.getUpgradeClientId();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
index 4508eafd04..8b97bba0f7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
@@ -16,7 +16,6 @@
  */
 package org.apache.hadoop.ozone.om.request.validation;
 
-import com.google.protobuf.ServiceException;
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.om.request.validation.testvalidatorset1.GeneralValidatorsForTesting;
 import org.apache.hadoop.ozone.om.request.validation.testvalidatorset1.GeneralValidatorsForTesting.ValidationListener;
@@ -69,7 +68,7 @@ public class TestRequestValidations {
   }
 
   @Test(expected = NullPointerException.class)
-  public void testUsingRegistryWithoutLoading() throws ServiceException {
+  public void testUsingRegistryWithoutLoading() throws Exception {
     new RequestValidations()
         .fromPackage(PACKAGE)
         .withinContext(of(aFinalizedVersionManager()))
@@ -77,7 +76,7 @@ public class TestRequestValidations {
   }
 
   @Test(expected = NullPointerException.class)
-  public void testUsingRegistryWithoutContext() throws ServiceException {
+  public void testUsingRegistryWithoutContext() throws Exception {
     new RequestValidations()
         .fromPackage(PACKAGE)
         .load()
@@ -85,7 +84,7 @@ public class TestRequestValidations {
   }
 
   @Test
-  public void testUsingRegistryWithoutPackage() throws ServiceException {
+  public void testUsingRegistryWithoutPackage() throws Exception {
     new RequestValidations()
         .withinContext(of(aFinalizedVersionManager()))
         .load()
@@ -96,7 +95,7 @@ public class TestRequestValidations {
 
   @Test
   public void testNoPreValidationsWithoutValidationMethods()
-      throws ServiceException {
+      throws Exception {
     int omVersion = 0;
     ValidationContext ctx = of(aFinalizedVersionManager());
     RequestValidations validations = loadEmptyValidations(ctx);
@@ -108,7 +107,7 @@ public class TestRequestValidations {
 
   @Test
   public void testNoPostValidationsWithoutValidationMethods()
-      throws ServiceException {
+      throws Exception {
     ValidationContext ctx = of(aFinalizedVersionManager());
     RequestValidations validations = loadEmptyValidations(ctx);
 
@@ -120,7 +119,7 @@ public class TestRequestValidations {
 
   @Test
   public void testNoPreValidationsRunningForRequestTypeWithoutValidators()
-      throws ServiceException {
+      throws Exception {
     ValidationContext ctx = of(aFinalizedVersionManager());
     RequestValidations validations = loadValidations(ctx);
 
@@ -131,7 +130,7 @@ public class TestRequestValidations {
 
   @Test
   public void testNoPostValidationsAreRunningForRequestTypeWithoutValidators()
-      throws ServiceException {
+      throws Exception {
     ValidationContext ctx = of(aFinalizedVersionManager());
     RequestValidations validations = loadValidations(ctx);
 
@@ -142,14 +141,14 @@ public class TestRequestValidations {
   }
 
   @Test
-  public void testPreProcessorExceptionHandling() {
+  public void testPreProcessorExceptionHandling() throws Exception {
     ValidationContext ctx = of(aFinalizedVersionManager());
     RequestValidations validations = loadValidations(ctx);
 
     try {
       validations.validateRequest(aDeleteKeysRequest(olderClientVersion()));
       fail("ServiceException was expected but was not thrown.");
-    } catch (ServiceException ignored) { }
+    } catch (Exception ignored) { }
 
     validationListener.assertNumOfEvents(1);
     validationListener.assertExactListOfValidatorsCalled(
@@ -165,7 +164,7 @@ public class TestRequestValidations {
       validations.validateResponse(
           aDeleteKeysRequest(olderClientVersion()), aDeleteKeysResponse());
       fail("ServiceException was expected but was not thrown.");
-    } catch (ServiceException ignored) { }
+    } catch (Exception ignored) { }
 
     validationListener.assertNumOfEvents(1);
     validationListener.assertExactListOfValidatorsCalled(
@@ -174,7 +173,7 @@ public class TestRequestValidations {
 
   @Test
   public void testOldClientConditionIsRecognizedAndPreValidatorsApplied()
-      throws ServiceException {
+      throws Exception {
     ValidationContext ctx = of(aFinalizedVersionManager());
     RequestValidations validations = loadValidations(ctx);
 
@@ -187,7 +186,7 @@ public class TestRequestValidations {
 
   @Test
   public void testOldClientConditionIsRecognizedAndPostValidatorsApplied()
-      throws ServiceException {
+      throws Exception {
     ValidationContext ctx = of(aFinalizedVersionManager());
     RequestValidations validations = loadValidations(ctx);
 
@@ -202,7 +201,7 @@ public class TestRequestValidations {
 
   @Test
   public void testPreFinalizedWithOldClientConditionPreProcValidatorsApplied()
-      throws ServiceException {
+      throws Exception {
     ValidationContext ctx = of(anUnfinalizedVersionManager());
     RequestValidations validations = loadValidations(ctx);
 
@@ -216,7 +215,7 @@ public class TestRequestValidations {
 
   @Test
   public void testPreFinalizedWithOldClientConditionPostProcValidatorsApplied()
-      throws ServiceException {
+      throws Exception {
     ValidationContext ctx = of(anUnfinalizedVersionManager());
     RequestValidations validations = loadValidations(ctx);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org