You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ad...@apache.org on 2022/04/11 18:23:19 UTC

[ozone] branch HDDS-4440-s3-performance updated (3f585ab05d -> 062890ee4b)

This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a change to branch HDDS-4440-s3-performance
in repository https://gitbox.apache.org/repos/asf/ozone.git


    from 3f585ab05d HDDS-5648. Track CI workflow tests temporarily disabled for feature branch (#3257)
     new d5ade578f4 Revert "HDDS-5648. Track CI workflow tests temporarily disabled for feature branch (#3257)"
     new d87a7461c1 Revert "HDDS-5545. Enable TLS for GRPC OmTransport implementation (#2945)"
     new d57616e165 Revert "HDDS-5544. Update GRPC OmTransport implementation for HA (#2901)"
     new 2522127b1c Revert "HDDS-6303.  Merge from master to resolve CI workflow issues & hrpcOmTransport support (#3074)"
     add 1ea978ac95 HDDS-6149. Remove unused keytabs (#2960)
     add 243cc3f6ec HDDS-6094. Some unit tests are skipped due to using JUnit4 runner (#2909)
     add 61f1c709d4 HDDS-6075. OzoneConfiguration constructor overrides input configuration keys. (#2921)
     add ca25193cc7 HDDS-4177. SCM Container DB bootstrap on Recon startup (#2942)
     add 9644f83c0a HDDS-6086. Compute MD5MD5CRC file checksum using chunk checksums from DataNodes (#2919)
     add a31b79a0cf HDDS-6148. Validate ContainerBalancerConfiguration before start ContainerBalancer (#2957)
     add c8f6ad0363 HDDS-6161. SCM StateMachine failing to reinitialize doesn't terminate the process. (#2971)
     add a348980cec HDDS-6134. Move replication-specific config to ReplicationServer (#2943)
     add 5200eabe0c HDDS-4010. S3G startup fails when multiple service ids are configured. (#2976)
     add a7aac99b19 HDDS-6170. Add metrics to replication manager to track container health states (#2975)
     add bcfb64ae58 HDDS-3231. Cleanup KeyManagerImpl (#2961)
     add ac99b47bba HDDS-5927. Improve defaults in ContainerBalancerConfiguration (#2892)
     add e1f9f2133d HDDS-6157. More consistent synchronization in InputStreams (#2965)
     add d09cdd4a58 HDDS-4743. [FSO] Add FSO variant of ITestOzoneContractDistcp. (#2980)
     add 195737dc72 HDDS-6114. Intermittent error due to Failed to init RocksDB (#2947)
     add 5447f583d5 HDDS-6175. Use s3Auth during proxy during decrypt in RpcClient. (#2981)
     add 3eb7235ca8 HDDS-5740. Enable ratis by default for SCM. (#2637)
     add 46f305d2ff HDDS-6183. Intermittent failure in TestKeyDeletingService.checkIfDeleteServiceWithFailingSCM. (#2991)
     add 1dd1d0ba39 HDDS-4190. Intermittent failure in TestOMVolumeSetOwnerRequest and TestOMVolumeSetQuotaRequest. (#2982)
     add 9785941bc7 HDDS-6120. Compute block checksum using chunk checksums (#2930)
     add cde7cb7661 HDDS-6147. Add ability in OM to get limited delta updates (#2956)
     add fc3015b151 HDDS-6195. Remove unused jmh-core dependency. (#2997)
     add 0c071ba883 HDDS-6167. Update ozone-runner version to 20211202-1 (#2969)
     add b8d97eb378 HDDS-6171. Create an API to change Bucket Owner (#2988)
     add a99ec037b5 HDDS-6163. Fix PATH in docker image (#2967)
     add 431ce39f0f HDDS-6202. Avoid using jmh-generator-annprocess since it is GPL2.0. (#2998)
     add 05773afc89 HDDS-6135. SCM Container DB bootstrap on Recon startup for SCM HA. (#2972)
     add 66aadb02df HDDS-6109. Preserve the underlying exception raised in client lib. (#2989)
     add bd91ab90da HDDS-3408. Rename ChunkLayOutVersion to ContainerLayoutVersion. (#2983)
     add 18f1461b28 HDDS-6203. CleanUp incomplete gz files during Container move (#3000)
     add 2af225d023 HDDS-6216. Move OMOpenKeysDeleteRequest to package om.request.key (#3011)
     add ada4a3bd29 HDDS-6191. Intermittent failure in TestDeleteWithSlowFollower (#3015)
     add 6180d212b4 HDDS-6128. CLI tool that downloads all the block replicas and creates a manifest file (#2987)
     add 3a16ebe62e  HDDS-6177. Extend container info command to include replica details  (#2995)
     add 5823f5695a HDDS-6211. [Docs] Image styling on deployed site does not replicate local builds. (#3007)
     add d3b1a06914 HDDS-6219. Switch to RATIS ReplicationType from STAND_ALONE in our tests. (#3014)
     add dc134102ee HDDS-6192. feature/Observability.md translated to Chinese (#2994)
     add dc6f27920c HDDS-6205. Add CLI command to display the latest Replication Manager report (#3013)
     add 11605e1713 HDDS-6227. Test helpers should observe naming conditions (#3020)
     add 634d666b97 HDDS-6239. ozonesecure-mr failing with No URLs in mirrorlist (#3029)
     add 5d496f2693 HDDS-6201. Fix NPE for DataScanner with scanned container deleted by others. (#3005)
     add 9186e6b295 HDDS-5529. For any IOexception from @Replicated method we should throw it (#2788)
     add 1804e225cb HDDS-6181. Change SCMHAInvocationHandler#invokeRatis() logging to TRACE (#2992)
     add e47b6f0c35 HDDS-6206. Application errors must not flood system log (#3001)
     add f757d9929c HDDS-6245. Add BucketLayout logging to Audit Logs (#3040)
     add aafc21ae05 HDDS-6238 Reduce memory requirements for list keys. (#3032)
     add 7cee0ea8ed HDDS-2919. Intermittent failure in TestRDBStore (#3028)
     add 5a35cabf85 HDDS-6253. Unnecessary duplicate smoketest after defaulting to FSO (#3036)
     add 5b1dbeb053 HDDS-6204. Cleanup handling malformed authorization header (#2999)
     add b32e135b9a HDDS-6169. Selective checks: skip junit tests on ozone-runner image update (#2974)
     add 14fa87ca5c HDDS-6270. Use a dedicated file instead of /etc/passwd for xcompat acceptance test (#3050)
     add 5c3aa0185b HDDS-6273. Amend doc SecuringTDE.md (#3047)
     add c5fb612790 HDDS-6140. Selective checks: skip unit check for integration-test changes (#2948)
     add 11f4c9b483 HDDS-6215. Recon get limited delta updates from OM (#3009)
     add 40d0a409fa HDDS-6226. Run tests for selective CI checks in CI (#3019)
     add 86a771dfe2 HDDS-6247. Avoid logging stack trace for user input problems (#3039)
     add de42c614a8 HDDS-6208. New checkstyle: WhitespaceAround (#3003)
     add ad6d3bcb57 HDDS-6289. Upgrade acceptance test log flooded with parse error (#3063)
     add f7b2dbd8f2 Necessary due to CI workflow issue, HDDS-6239, impacting feature branch green builds.  Merging with Master branch impacts HDDS-4440 PRs in progress.  This merge also requires partial changes that were initially in PR for HDDS-5545 to be included.  Namely, the patch to allow switching between OmTransports, GrpcOmTransport and Hadoop3OmTransport through ozone configuration in the OmConfigKeys.
     add e3259164ca Trigger Build
     add 666525e386 Fix integration test for added configuation field for selecting OmTransport for s3 gateway - TestOzoneConfigurationFields (added config key not in xml).
     new 0de099dbde Merge remote-tracking branch 'neils-dev/merge-master-s3g' into HDDS-4440-s3-performance
     new 76e238118d HDDS-5544. Update GRPC OmTransport implementation for HA (#2901)
     new 47e457697e HDDS-5545. Enable TLS for GRPC OmTransport implementation (#2945)
     new 062890ee4b HDDS-5648. Track CI workflow tests temporarily disabled for feature branch (#3257)

The 8 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 03/08: Revert "HDDS-5544. Update GRPC OmTransport implementation for HA (#2901)"

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch HDDS-4440-s3-performance
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit d57616e165a6500e626d82159fefb95907db808d
Author: Doroszlai, Attila <ad...@apache.org>
AuthorDate: Mon Apr 11 19:12:00 2022 +0200

    Revert "HDDS-5544. Update GRPC OmTransport implementation for HA (#2901)"
    
    This reverts commit 413d4aade25e77b444fcbbea36a3302cd2a5dc66.
---
 .../java/org/apache/hadoop/hdds/HddsUtils.java     |  20 ---
 .../java/org/apache/hadoop/hdds/TestHddsUtils.java |  39 +---
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   3 +-
 .../ozone/om/ha/GrpcOMFailoverProxyProvider.java   | 143 ---------------
 .../ozone/om/ha/OMFailoverProxyProvider.java       |  22 ++-
 .../ozone/om/protocolPB/GrpcOmTransport.java       | 196 ++++-----------------
 .../ozone/om/protocolPB/TestS3GrpcOmTransport.java | 119 ++-----------
 .../src/main/compose/ozone-om-ha/docker-config     |   1 -
 .../src/main/compose/ozonesecure-ha/docker-config  |   1 -
 .../dist/src/main/compose/ozonesecure-ha/test.sh   |   2 +-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   3 +-
 .../hadoop/ozone/om/GrpcOzoneManagerServer.java    |  20 +--
 .../hadoop/ozone/om/OzoneManagerServiceGrpc.java   |  43 ++++-
 .../hadoop/ozone/om/failover/TestOMFailovers.java  |   2 +-
 14 files changed, 98 insertions(+), 516 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 364377d396..ffbb3e3340 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -227,26 +227,6 @@ public final class HddsUtils {
     }
   }
 
-  /**
-   * Retrieve a number, trying the supplied config keys in order.
-   * Each config value may be absent
-   *
-   * @param conf Conf
-   * @param keys a list of configuration key names.
-   *
-   * @return first number found from the given keys, or absent.
-   */
-  public static OptionalInt getNumberFromConfigKeys(
-      ConfigurationSource conf, String... keys) {
-    for (final String key : keys) {
-      final String value = conf.getTrimmed(key);
-      if (value != null) {
-        return OptionalInt.of(Integer.parseInt(value));
-      }
-    }
-    return OptionalInt.empty();
-  }
-
   /**
    * Retrieve the port number, trying the supplied config keys in order.
    * Each config value may be absent, or if present in the format
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
index 67001010d5..fd8aa28e63 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
@@ -36,8 +36,6 @@ import static org.apache.hadoop.hdds.HddsUtils.getSCMAddressForDatanodes;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
-
 import static org.hamcrest.core.Is.is;
 import org.junit.Assert;
 import static org.junit.Assert.assertThat;
@@ -218,39 +216,4 @@ public class TestHddsUtils {
 
   }
 
-  @Test
-  public void testGetNumberFromConfigKeys() {
-    final String testnum1 = "8";
-    final String testnum2 = "7";
-    final String serviceId = "id1";
-    final String nodeId = "scm1";
-
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
-        testnum1);
-    Assert.assertTrue(Integer.parseInt(testnum1) ==
-        HddsUtils.getNumberFromConfigKeys(
-            conf,
-            OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT).orElse(0));
-
-    /* Test to return first unempty key number from list */
-    /* first key is absent */
-    Assert.assertTrue(Integer.parseInt(testnum1) ==
-        HddsUtils.getNumberFromConfigKeys(
-            conf,
-            ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
-                serviceId, nodeId),
-            OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT).orElse(0));
-
-    /* now set the empty key and ensure returned value from this key */
-    conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
-            serviceId, nodeId),
-        testnum2);
-    Assert.assertTrue(Integer.parseInt(testnum2) ==
-        HddsUtils.getNumberFromConfigKeys(
-            conf,
-            ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
-                serviceId, nodeId),
-            OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT).orElse(0));
-  }
-}
+}
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 6ebd7e11ad..cdd9e52667 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -57,8 +57,7 @@ public final class OMConfigKeys {
   public static final String OZONE_OM_BIND_HOST_DEFAULT =
       "0.0.0.0";
   public static final int OZONE_OM_PORT_DEFAULT = 9862;
-  public static final String OZONE_OM_GRPC_PORT_KEY =
-      "ozone.om.grpc.port";
+
   public static final String OZONE_OM_HTTP_ENABLED_KEY =
       "ozone.om.http.enabled";
   public static final String OZONE_OM_HTTP_BIND_HOST_KEY =
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java
deleted file mode 100644
index 498f935974..0000000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.ha;
-
-import org.apache.hadoop.hdds.conf.ConfigurationException;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ozone.OmUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.ha.ConfUtils;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransport;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.OptionalInt;
-
-import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-
-/**
- * The Grpc s3gateway om transport failover proxy provider implementation
- * extending the ozone client OM failover proxy provider.  This implmentation
- * allows the Grpc OMTransport reuse OM failover retry policies and
- * getRetryAction methods.  In case of OM failover, client can try
- * connecting to another OM node from the list of proxies.
- */
-public class GrpcOMFailoverProxyProvider<T> extends
-    OMFailoverProxyProvider<T> {
-
-  private Map<String, String> omAddresses;
-
-  public GrpcOMFailoverProxyProvider(ConfigurationSource configuration,
-                                     UserGroupInformation ugi,
-                                     String omServiceId,
-                                     Class<T> protocol) throws IOException {
-    super(configuration, ugi, omServiceId, protocol);
-  }
-
-  @Override
-  protected void loadOMClientConfigs(ConfigurationSource config, String omSvcId)
-      throws IOException {
-    // to be used for base class omProxies,
-    // ProxyInfo not applicable for gRPC, just need key set
-    Map<String, ProxyInfo<T>> omProxiesNodeIdKeyset = new HashMap<>();
-    // to be used for base class omProxyInfos
-    // OMProxyInfo not applicable for gRPC, just need key set
-    Map<String, OMProxyInfo> omProxyInfosNodeIdKeyset = new HashMap<>();
-    List<String> omNodeIDList = new ArrayList<>();
-    omAddresses = new HashMap<>();
-
-    Collection<String> omNodeIds = OmUtils.getActiveOMNodeIds(config, omSvcId);
-
-    for (String nodeId : OmUtils.emptyAsSingletonNull(omNodeIds)) {
-
-      String rpcAddrKey = ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
-          omSvcId, nodeId);
-
-      Optional<String> hostaddr = getHostNameFromConfigKeys(config,
-          rpcAddrKey);
-
-      OptionalInt hostport = HddsUtils.getNumberFromConfigKeys(config,
-          ConfUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_GRPC_PORT_KEY,
-              omSvcId, nodeId),
-          OMConfigKeys.OZONE_OM_GRPC_PORT_KEY);
-      if (nodeId == null) {
-        nodeId = OzoneConsts.OM_DEFAULT_NODE_ID;
-      }
-      omProxiesNodeIdKeyset.put(nodeId, null);
-      omProxyInfosNodeIdKeyset.put(nodeId, null);
-      if (hostaddr.isPresent()) {
-        omAddresses.put(nodeId,
-            hostaddr.get() + ":"
-                + hostport.orElse(config
-                .getObject(GrpcOmTransport
-                    .GrpcOmTransportConfig.class)
-                .getPort()));
-      } else {
-        LOG.error("expected host address not defined for: {}", rpcAddrKey);
-        throw new ConfigurationException(rpcAddrKey + "is not defined");
-      }
-      omNodeIDList.add(nodeId);
-    }
-
-    if (omProxiesNodeIdKeyset.isEmpty()) {
-      throw new IllegalArgumentException("Could not find any configured " +
-          "addresses for OM. Please configure the system with "
-          + OZONE_OM_ADDRESS_KEY);
-    }
-
-    // set base class omProxies, omProxyInfos, omNodeIDList
-
-    // omProxies needed in base class
-    // omProxies.size == number of om nodes
-    // omProxies key needs to be valid nodeid
-    // omProxyInfos keyset needed in base class
-    setProxies(omProxiesNodeIdKeyset, omProxyInfosNodeIdKeyset, omNodeIDList);
-  }
-
-  @Override
-  protected Text computeDelegationTokenService() {
-    return new Text();
-  }
-
-  // need to throw if nodeID not in omAddresses
-  public String getGrpcProxyAddress(String nodeId) throws IOException {
-    if (omAddresses.containsKey(nodeId)) {
-      return omAddresses.get(nodeId);
-    } else {
-      LOG.error("expected nodeId not found in omAddresses for proxyhost {}",
-          nodeId);
-      throw new IOException(
-          "expected nodeId not found in omAddresses for proxyhost");
-    }
-
-  }
-
-  public List<String> getGrpcOmNodeIDList() {
-    return getOmNodeIDList();
-  }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
index 9fb690e760..5432468452 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
@@ -148,6 +148,8 @@ public class OMFailoverProxyProvider<T> implements
             rpcAddrStr);
 
         if (omProxyInfo.getAddress() != null) {
+
+
           // For a non-HA OM setup, nodeId might be null. If so, we assign it
           // the default value
           if (nodeId == null) {
@@ -549,18 +551,14 @@ public class OMFailoverProxyProvider<T> implements
     return null;
   }
 
-  protected void setProxies(
-      Map<String, ProxyInfo<T>> setOMProxies,
-      Map<String, OMProxyInfo> setOMProxyInfos,
-      List<String> setOMNodeIDList) {
-    this.omProxies = setOMProxies;
-    this.omProxyInfos = setOMProxyInfos;
-    this.omNodeIDList = setOMNodeIDList;
-  }
-
-  protected List<String> getOmNodeIDList() {
-    return omNodeIDList;
+  @VisibleForTesting
+  protected void setProxiesForTesting(
+      Map<String, ProxyInfo<T>> testOMProxies,
+      Map<String, OMProxyInfo> testOMProxyInfos,
+      List<String> testOMNodeIDList) {
+    this.omProxies = testOMProxies;
+    this.omProxyInfos = testOMProxyInfos;
+    this.omNodeIDList = testOMNodeIDList;
   }
-
 }
 
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
index 72c29f0cc6..3607429e52 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
@@ -18,34 +18,22 @@
 package org.apache.hadoop.ozone.om.protocolPB;
 
 import java.io.IOException;
-import java.lang.reflect.Constructor;
+import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
 
-import com.google.common.net.HostAndPort;
 import io.grpc.Status;
-import io.grpc.StatusRuntimeException;
-import org.apache.hadoop.ipc.RemoteException;
-
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.security.UserGroupInformation;
 
-import org.apache.hadoop.ozone.om.ha.GrpcOMFailoverProxyProvider;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerServiceGrpc;
 import io.grpc.ManagedChannel;
 import io.grpc.netty.NettyChannelBuilder;
@@ -54,10 +42,12 @@ import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys
     .OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH;
 import static org.apache.hadoop.ozone.om.OMConfigKeys
     .OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT;
+import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
 
 /**
  * Grpc transport for grpc between s3g and om.
@@ -70,169 +60,58 @@ public class GrpcOmTransport implements OmTransport {
   private final AtomicBoolean isRunning = new AtomicBoolean(false);
 
   // gRPC specific
+  private ManagedChannel channel;
+
   private OzoneManagerServiceGrpc.OzoneManagerServiceBlockingStub client;
-  private Map<String,
-      OzoneManagerServiceGrpc.OzoneManagerServiceBlockingStub> clients;
-  private Map<String, ManagedChannel> channels;
-  private int lastVisited = -1;
-  private ConfigurationSource conf;
 
-  //private String host = "om";
-  private AtomicReference<String> host;
+  private String host = "om";
+  private int port = 8981;
   private int maxSize;
 
-  private List<String> oms;
-  private RetryPolicy retryPolicy;
-  private int failoverCount = 0;
-  private GrpcOMFailoverProxyProvider<OzoneManagerProtocolPB>
-      omFailoverProxyProvider;
-
   public GrpcOmTransport(ConfigurationSource conf,
                           UserGroupInformation ugi, String omServiceId)
       throws IOException {
+    Optional<String> omHost = getHostNameFromConfigKeys(conf,
+        OZONE_OM_ADDRESS_KEY);
+    this.host = omHost.orElse("0.0.0.0");
 
-    this.channels = new HashMap<>();
-    this.clients = new HashMap<>();
-    this.conf = conf;
-    this.host = new AtomicReference();
+    port = conf.getObject(GrpcOmTransportConfig.class).getPort();
 
     maxSize = conf.getInt(OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH,
         OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT);
 
-    omFailoverProxyProvider = new GrpcOMFailoverProxyProvider(
-        conf,
-        ugi,
-        omServiceId,
-        OzoneManagerProtocolPB.class);
-
     start();
   }
 
-  public void start() throws IOException {
-    host.set(omFailoverProxyProvider
-        .getGrpcProxyAddress(
-            omFailoverProxyProvider.getCurrentProxyOMNodeId()));
-
+  public void start() {
     if (!isRunning.compareAndSet(false, true)) {
       LOG.info("Ignore. already started.");
       return;
     }
+    NettyChannelBuilder channelBuilder =
+        NettyChannelBuilder.forAddress(host, port)
+            .usePlaintext()
+            .maxInboundMessageSize(maxSize);
 
-    List<String> nodes = omFailoverProxyProvider.getGrpcOmNodeIDList();
-    for (String nodeId : nodes) {
-      String hostaddr = omFailoverProxyProvider.getGrpcProxyAddress(nodeId);
-      HostAndPort hp = HostAndPort.fromString(hostaddr);
-
-      NettyChannelBuilder channelBuilder =
-          NettyChannelBuilder.forAddress(hp.getHost(), hp.getPort())
-              .usePlaintext()
-              .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
-      channels.put(hostaddr, channelBuilder.build());
-      clients.put(hostaddr,
-          OzoneManagerServiceGrpc
-              .newBlockingStub(channels.get(hostaddr)));
-    }
-    int maxFailovers = conf.getInt(
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
-        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
+    channel = channelBuilder.build();
+    client = OzoneManagerServiceGrpc.newBlockingStub(channel);
 
-
-    retryPolicy = omFailoverProxyProvider.getRetryPolicy(maxFailovers);
     LOG.info("{}: started", CLIENT_NAME);
   }
 
   @Override
   public OMResponse submitRequest(OMRequest payload) throws IOException {
     OMResponse resp = null;
-    boolean tryOtherHost = true;
-    ResultCodes resultCode = ResultCodes.INTERNAL_ERROR;
-    while (tryOtherHost) {
-      tryOtherHost = false;
-      try {
-        resp = clients.get(host.get()).submitRequest(payload);
-      } catch (StatusRuntimeException e) {
-        if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) {
-          resultCode = ResultCodes.TIMEOUT;
-        }
-        Exception exp = new Exception(e);
-        tryOtherHost = shouldRetry(unwrapException(exp));
-        if (!tryOtherHost) {
-          throw new OMException(resultCode);
-        }
-      }
-    }
-    return resp;
-  }
-
-  private Exception unwrapException(Exception ex) {
-    Exception grpcException = null;
     try {
-      StatusRuntimeException srexp =
-          (StatusRuntimeException)ex.getCause();
-      Status status = srexp.getStatus();
-      LOG.debug("GRPC exception wrapped: {}", status.getDescription());
-      if (status.getCode() == Status.Code.INTERNAL) {
-        // exception potentially generated by OzoneManagerServiceGrpc
-        Class<?> realClass = Class.forName(status.getDescription()
-            .substring(0, status.getDescription()
-                .indexOf(":")));
-        Class<? extends Exception> cls = realClass
-            .asSubclass(Exception.class);
-        Constructor<? extends Exception> cn = cls.getConstructor(String.class);
-        cn.setAccessible(true);
-        grpcException = cn.newInstance(status.getDescription());
-        IOException remote = null;
-        try {
-          String cause = status.getDescription();
-          cause = cause.substring(cause.indexOf(":") + 2);
-          remote = new RemoteException(cause.substring(0, cause.indexOf(":")),
-              cause.substring(cause.indexOf(":") + 1));
-          grpcException.initCause(remote);
-        } catch (Exception e) {
-          LOG.error("cannot get cause for remote exception");
-        }
-      } else {
-        // exception generated by connection failure, gRPC
-        grpcException = ex;
+      resp = client.submitRequest(payload);
+    } catch (io.grpc.StatusRuntimeException e) {
+      ResultCodes resultCode = ResultCodes.INTERNAL_ERROR;
+      if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) {
+        resultCode = ResultCodes.TIMEOUT;
       }
-    } catch (Exception e) {
-      grpcException = new IOException(e);
-      LOG.error("error unwrapping exception from OMResponse {}");
-    }
-    return grpcException;
-  }
-
-  private boolean shouldRetry(Exception ex) {
-    boolean retry = false;
-    RetryPolicy.RetryAction action = null;
-    try {
-      action = retryPolicy.shouldRetry((Exception)ex, 0, failoverCount++, true);
-      LOG.debug("grpc failover retry action {}", action.action);
-      if (action.action == RetryPolicy.RetryAction.RetryDecision.FAIL) {
-        retry = false;
-        LOG.error("Retry request failed. " + action.reason, ex);
-      } else {
-        if (action.action == RetryPolicy.RetryAction.RetryDecision.RETRY ||
-            (action.action == RetryPolicy.RetryAction.RetryDecision
-                .FAILOVER_AND_RETRY)) {
-          if (action.delayMillis > 0) {
-            try {
-              Thread.sleep(action.delayMillis);
-            } catch (Exception e) {
-              LOG.error("Error trying sleep thread for {}", action.delayMillis);
-            }
-          }
-          // switch om host to current proxy OMNodeId
-          host.set(omFailoverProxyProvider
-              .getGrpcProxyAddress(
-                  omFailoverProxyProvider.getCurrentProxyOMNodeId()));
-          retry = true;
-        }
-      }
-    } catch (Exception e) {
-      LOG.error("Failed failover exception {}", e);
+      throw new OMException(e.getCause(), resultCode);
     }
-    return retry;
+    return resp;
   }
 
   // stub implementation for interface
@@ -242,15 +121,11 @@ public class GrpcOmTransport implements OmTransport {
   }
 
   public void shutdown() {
-    for (Map.Entry<String, ManagedChannel> entry : channels.entrySet()) {
-      ManagedChannel channel = entry.getValue();
-      channel.shutdown();
-      try {
-        channel.awaitTermination(5, TimeUnit.SECONDS);
-      } catch (Exception e) {
-        LOG.error("failed to shutdown OzoneManagerServiceGrpc channel {} : {}",
-            entry.getKey(), e);
-      }
+    channel.shutdown();
+    try {
+      channel.awaitTermination(5, TimeUnit.SECONDS);
+    } catch (Exception e) {
+      LOG.error("failed to shutdown OzoneManagerServiceGrpc channel", e);
     }
   }
 
@@ -281,16 +156,9 @@ public class GrpcOmTransport implements OmTransport {
   }
 
   @VisibleForTesting
-  public void startClient(ManagedChannel testChannel) throws IOException {
-    List<String> nodes = omFailoverProxyProvider.getGrpcOmNodeIDList();
-    for (String nodeId : nodes) {
-      String hostaddr = omFailoverProxyProvider.getGrpcProxyAddress(nodeId);
+  public void startClient(ManagedChannel testChannel) {
+    client = OzoneManagerServiceGrpc.newBlockingStub(testChannel);
 
-      clients.put(hostaddr,
-          OzoneManagerServiceGrpc
-              .newBlockingStub(testChannel));
-    }
     LOG.info("{}: started", CLIENT_NAME);
   }
-
 }
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
index b427db5562..323bb0eeb3 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
@@ -25,29 +25,25 @@ import static org.mockito.Mockito.mock;
 import io.grpc.inprocess.InProcessChannelBuilder;
 import io.grpc.inprocess.InProcessServerBuilder;
 import io.grpc.testing.GrpcCleanupRule;
-import io.grpc.ManagedChannel;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerServiceGrpc;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.Before;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import java.io.IOException;
 
-import com.google.protobuf.ServiceException;
-import org.apache.ratis.protocol.RaftPeerId;
+import io.grpc.ManagedChannel;
 
-import static org.junit.Assert.fail;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
 
 /**
  * Tests for GrpcOmTransport client.
@@ -63,32 +59,11 @@ public class TestS3GrpcOmTransport {
 
   private final OMResponse omResponse = OMResponse.newBuilder()
                   .setSuccess(true)
-                  .setStatus(org.apache.hadoop.ozone.protocol
-                      .proto.OzoneManagerProtocolProtos.Status.OK)
+                  .setStatus(Status.OK)
                   .setLeaderOMNodeId(leaderOMNodeId)
                   .setCmdType(Type.AllocateBlock)
                   .build();
 
-  private boolean doFailover = false;
-
-  private OzoneConfiguration conf;
-
-  private String omServiceId;
-  private UserGroupInformation ugi;
-  private ManagedChannel channel;
-
-
-  private ServiceException createNotLeaderException() {
-    RaftPeerId raftPeerId = RaftPeerId.getRaftPeerId("testNodeId");
-
-    // TODO: Set suggest leaderID. Right now, client is not using suggest
-    // leaderID. Need to fix this.
-    OMNotLeaderException notLeaderException =
-        new OMNotLeaderException(raftPeerId);
-    LOG.debug(notLeaderException.getMessage());
-    return new ServiceException(notLeaderException);
-  }
-
   private final OzoneManagerServiceGrpc.OzoneManagerServiceImplBase
       serviceImpl =
         mock(OzoneManagerServiceGrpc.OzoneManagerServiceImplBase.class,
@@ -103,22 +78,10 @@ public class TestS3GrpcOmTransport {
                                               .OzoneManagerProtocolProtos
                                               .OMResponse>
                                           responseObserver) {
-                  try {
-                    if (doFailover) {
-                      doFailover = false;
-                      throw createNotLeaderException();
-                    } else {
-                      responseObserver.onNext(omResponse);
-                      responseObserver.onCompleted();
-                    }
-                  } catch (Throwable e) {
-                    IOException ex = new IOException(e.getCause());
-                    responseObserver.onError(io.grpc.Status
-                        .INTERNAL
-                        .withDescription(ex.getMessage())
-                        .asRuntimeException());
-                  }
+                  responseObserver.onNext(omResponse);
+                  responseObserver.onCompleted();
                 }
+
               }));
 
   private GrpcOmTransport client;
@@ -138,37 +101,18 @@ public class TestS3GrpcOmTransport {
         .start());
 
     // Create a client channel and register for automatic graceful shutdown.
-    channel = grpcCleanup.register(
+    ManagedChannel channel = grpcCleanup.register(
         InProcessChannelBuilder.forName(serverName).directExecutor().build());
 
-    omServiceId = "";
-    conf = new OzoneConfiguration();
-    ugi = UserGroupInformation.getCurrentUser();
-    doFailover = false;
-  }
-
-  @Test
-  public void testSubmitRequestToServer() throws Exception {
-    ServiceListRequest req = ServiceListRequest.newBuilder().build();
-
-    final OMRequest omRequest = OMRequest.newBuilder()
-        .setCmdType(Type.ServiceList)
-        .setVersion(CURRENT_VERSION)
-        .setClientId("test")
-        .setServiceListRequest(req)
-        .build();
-
+    String omServiceId = "";
+    OzoneConfiguration conf = new OzoneConfiguration();
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     client = new GrpcOmTransport(conf, ugi, omServiceId);
     client.startClient(channel);
-
-    final OMResponse resp = client.submitRequest(omRequest);
-    Assert.assertEquals(resp.getStatus(), org.apache.hadoop.ozone.protocol
-        .proto.OzoneManagerProtocolProtos.Status.OK);
-    Assert.assertEquals(resp.getLeaderOMNodeId(), leaderOMNodeId);
   }
 
   @Test
-  public void testGrpcFailoverProxy() throws Exception {
+  public void testSubmitRequestToServer() throws Exception {
     ServiceListRequest req = ServiceListRequest.newBuilder().build();
 
     final OMRequest omRequest = OMRequest.newBuilder()
@@ -178,45 +122,8 @@ public class TestS3GrpcOmTransport {
         .setServiceListRequest(req)
         .build();
 
-    client = new GrpcOmTransport(conf, ugi, omServiceId);
-    client.startClient(channel);
-
-    doFailover = true;
-    // first invocation generates a NotALeaderException
-    // failover is performed and request is internally retried
-    // second invocation request to server succeeds
     final OMResponse resp = client.submitRequest(omRequest);
-    Assert.assertEquals(resp.getStatus(), org.apache.hadoop.ozone.protocol
-        .proto.OzoneManagerProtocolProtos.Status.OK);
+    Assert.assertEquals(resp.getStatus(), OK);
     Assert.assertEquals(resp.getLeaderOMNodeId(), leaderOMNodeId);
   }
-
-  @Test
-  public void testGrpcFailoverProxyExhaustRetry() throws Exception {
-    ServiceListRequest req = ServiceListRequest.newBuilder().build();
-
-    final OMRequest omRequest = OMRequest.newBuilder()
-        .setCmdType(Type.ServiceList)
-        .setVersion(CURRENT_VERSION)
-        .setClientId("test")
-        .setServiceListRequest(req)
-        .build();
-
-    conf.setInt(OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 0);
-    client = new GrpcOmTransport(conf, ugi, omServiceId);
-    client.startClient(channel);
-
-    doFailover = true;
-    // first invocation generates a NotALeaderException
-    // failover is performed and request is internally retried
-    // OMFailoverProvider returns Fail retry due to #attempts >
-    // max failovers
-
-    try {
-      final OMResponse resp = client.submitRequest(omRequest);
-      fail();
-    } catch (Exception e) {
-      Assert.assertTrue(true);
-    }
-  }
 }
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
index 4642680394..69f4e52eae 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
@@ -36,7 +36,6 @@ OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true
 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
 OZONE-SITE.XML_hdds.container.report.interval=60s
-OZONE-SITE.XML_ozone.om.s3.grpc.server_enabled=true
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 ASYNC_PROFILER_HOME=/opt/profiler
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
index be93d0a6ec..498d02efae 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
@@ -51,7 +51,6 @@ OZONE-SITE.XML_hdds.grpc.tls.enabled=true
 OZONE-SITE.XML_ozone.replication=3
 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
 OZONE-SITE.XML_hdds.container.report.interval=60s
-OZONE-SITE.XML_ozone.om.s3.grpc.server_enabled=true
 
 OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m
 OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh
index 252f953163..7410822cfa 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh
@@ -35,7 +35,7 @@ execute_robot_test ${SCM} freon
 
 execute_robot_test ${SCM} basic/links.robot
 
-execute_robot_test ${SCM} s3
+#execute_robot_test ${SCM} s3
 
 execute_robot_test ${SCM} admincli
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 1c772cf46b..3269c394f7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -111,8 +111,7 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
         ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM,
         OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY,
         OMConfigKeys.OZONE_OM_HA_PREFIX,
-        OMConfigKeys.OZONE_OM_TRANSPORT_CLASS,
-        OMConfigKeys.OZONE_OM_GRPC_PORT_KEY
+        OMConfigKeys.OZONE_OM_TRANSPORT_CLASS
         // TODO HDDS-2856
     ));
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
index 7fe338c83e..60942f971b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
@@ -18,16 +18,13 @@
 package org.apache.hadoop.ozone.om;
 
 import java.io.IOException;
-import java.util.OptionalInt;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
 import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager;
 import io.grpc.Server;
@@ -50,20 +47,9 @@ public class GrpcOzoneManagerServer {
                                     omTranslator,
                                 OzoneDelegationTokenSecretManager
                                     delegationTokenMgr) {
-    OptionalInt haPort = HddsUtils.getNumberFromConfigKeys(config,
-        ConfUtils.addKeySuffixes(
-            OMConfigKeys.OZONE_OM_GRPC_PORT_KEY,
-            config.get(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY),
-            config.get(OMConfigKeys.OZONE_OM_NODE_ID_KEY)),
-        OMConfigKeys.OZONE_OM_GRPC_PORT_KEY);
-    if (haPort.isPresent()) {
-      this.port = haPort.getAsInt();
-    } else {
-      this.port = config.getObject(
-              GrpcOzoneManagerServerConfig.class).
-          getPort();
-    }
-
+    this.port = config.getObject(
+        GrpcOzoneManagerServerConfig.class).
+        getPort();
     init(omTranslator,
         delegationTokenMgr,
         config);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java
index a88e259a28..de11608703 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.ozone.om;
 
-import io.grpc.Status;
 import com.google.protobuf.RpcController;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.ipc.ClientId;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerServiceGrpc.OzoneManagerServiceImplBase;
 import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
 import org.apache.hadoop.ozone.protocol.proto
@@ -68,6 +68,7 @@ public class OzoneManagerServiceGrpc extends OzoneManagerServiceImplBase {
         "processing s3g client submit request - for command {}",
         request.getCmdType().name());
     AtomicInteger callCount = new AtomicInteger(0);
+    OMResponse omResponse = null;
 
     org.apache.hadoop.ipc.Server.getCurCall().set(new Server.Call(1,
         callCount.incrementAndGet(),
@@ -83,16 +84,42 @@ public class OzoneManagerServiceGrpc extends OzoneManagerServiceImplBase {
     // for OMRequests.  Test through successful ratis-enabled OMRequest
     // handling without dependency on hadoop IPC based Server.
     try {
-      OMResponse omResponse = this.omTranslator.
+      omResponse = this.omTranslator.
           submitRequest(NULL_RPC_CONTROLLER, request);
-      responseObserver.onNext(omResponse);
     } catch (Throwable e) {
-      IOException ex = new IOException(e.getCause());
-      responseObserver.onError(Status
-          .INTERNAL
-          .withDescription(ex.getMessage())
-          .asRuntimeException());
+      IOException ioe = null;
+      Throwable se = e.getCause();
+      if (se == null) {
+        ioe = new IOException(e);
+      } else {
+        ioe = se instanceof IOException ?
+            (IOException) se : new IOException(e);
+      }
+      omResponse = createErrorResponse(
+          request,
+          ioe);
     }
+    responseObserver.onNext(omResponse);
     responseObserver.onCompleted();
   }
+
+  /**
+   * Create OMResponse from the specified OMRequest and exception.
+   *
+   * @param omRequest
+   * @param exception
+   * @return OMResponse
+   */
+  private OMResponse createErrorResponse(
+      OMRequest omRequest, IOException exception) {
+    OMResponse.Builder omResponse = OMResponse.newBuilder()
+        .setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(exception))
+        .setCmdType(omRequest.getCmdType())
+        .setTraceID(omRequest.getTraceID())
+        .setSuccess(false);
+    if (exception.getMessage() != null) {
+      omResponse.setMessage(exception.getMessage());
+    }
+    return omResponse.build();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
index fe7f6f49ea..01601668b6 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
@@ -143,7 +143,7 @@ public class TestOMFailovers {
         omProxyInfos.put(nodeId, null);
         omNodeIDList.add(nodeId);
       }
-      setProxies(omProxies, omProxyInfos, omNodeIDList);
+      setProxiesForTesting(omProxies, omProxyInfos, omNodeIDList);
     }
 
     @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 05/08: Merge remote-tracking branch 'neils-dev/merge-master-s3g' into HDDS-4440-s3-performance

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch HDDS-4440-s3-performance
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 0de099dbdebc602fa0630bb17c439388a00b2844
Merge: 2522127b1c 666525e386
Author: Doroszlai, Attila <ad...@apache.org>
AuthorDate: Mon Apr 11 19:13:25 2022 +0200

    Merge remote-tracking branch 'neils-dev/merge-master-s3g' into HDDS-4440-s3-performance

 .github/workflows/post-commit.yml                  |   6 +
 .gitignore                                         |   3 +-
 CONTRIBUTING.md                                    |   3 +-
 dev-support/ci/selective_ci_checks.bats            |  24 +-
 dev-support/ci/selective_ci_checks.sh              |  21 +-
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |   8 +-
 .../hadoop/hdds/scm/client/HddsClientUtils.java    |   4 +-
 .../hadoop/hdds/scm/storage/BlockInputStream.java  |   8 +-
 .../hadoop/hdds/scm/storage/BlockOutputStream.java |   6 +-
 .../apache/hadoop/hdds/scm/storage/BufferPool.java |   2 +-
 .../hdds/scm/storage/RatisBlockOutputStream.java   |   2 +-
 hadoop-hdds/common/pom.xml                         |  15 +
 .../org/apache/hadoop/hdds/HddsConfigKeys.java     |   2 +-
 .../java/org/apache/hadoop/hdds/HddsUtils.java     |   2 +-
 .../java/org/apache/hadoop/hdds/StringUtils.java   |   2 +-
 .../hadoop/hdds/annotation/InterfaceAudience.java  |   6 +-
 .../org/apache/hadoop/hdds/client/OzoneQuota.java  |   2 +-
 .../org/apache/hadoop/hdds/client/QuotaList.java   |  11 +-
 .../hadoop/hdds/client/ReplicationFactor.java      |   2 +-
 .../hadoop/hdds/conf/OzoneConfiguration.java       |   9 +-
 .../hadoop/hdds/fs/CachingSpaceUsageSource.java    |   2 +-
 .../hadoop/hdds/protocol/DatanodeDetails.java      |   4 +-
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |   2 +-
 .../org/apache/hadoop/hdds/recon/ReconConfig.java  |   2 +-
 .../hadoop/hdds/scm/ByteStringConversion.java      |   2 +-
 .../java/org/apache/hadoop/hdds/scm/ScmConfig.java |   2 +-
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |   8 +-
 .../apache/hadoop/hdds/scm/client/ScmClient.java   |  24 +-
 .../hdds/scm/container/ContainerReplicaInfo.java   | 129 +++
 .../scm/container/ReplicationManagerReport.java    | 283 +++++++
 .../apache/hadoop/hdds/scm/net/InnerNodeImpl.java  |  14 +-
 .../hadoop/hdds/scm/net/NetworkTopologyImpl.java   |  28 +-
 .../org/apache/hadoop/hdds/scm/net/NodeSchema.java |   4 +-
 .../hadoop/hdds/scm/net/NodeSchemaLoader.java      |   8 +-
 .../apache/hadoop/hdds/scm/pipeline/Pipeline.java  |   8 +-
 .../protocol/StorageContainerLocationProtocol.java |  24 +-
 .../hdds/scm/storage/ContainerProtocolCalls.java   |   8 +-
 .../apache/hadoop/hdds/utils/HddsVersionInfo.java  |   2 +-
 .../hadoop/hdds/utils/ResourceSemaphore.java       |  10 +-
 .../org/apache/hadoop/hdds/utils/UniqueId.java     |   2 +-
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |  13 +-
 .../hadoop/ozone/audit/AuditEventStatus.java       |   2 +-
 .../org/apache/hadoop/ozone/audit/AuditLogger.java |   4 +-
 .../apache/hadoop/ozone/audit/AuditLoggerType.java |   2 +-
 .../org/apache/hadoop/ozone/audit/AuditMarker.java |   4 +-
 .../apache/hadoop/ozone/audit/AuditMessage.java    |  10 +-
 .../org/apache/hadoop/ozone/audit/SCMAction.java   |   3 +-
 .../hadoop/ozone/common/ChecksumByteBuffer.java    |   2 +-
 .../ozone/common/IncrementalChunkBuffer.java       |  13 +-
 .../apache/hadoop/ozone/common/StorageInfo.java    |   8 +-
 .../ozone/common/ha/ratis/RatisSnapshotInfo.java   |   2 +-
 .../ozone/container/common/helpers/BlockData.java  |   4 +-
 .../helpers/ContainerCommandRequestPBHelper.java   |  10 +-
 .../java/org/apache/hadoop/ozone/lease/Lease.java  |   8 +-
 .../hadoop/ozone/lease/LeaseCallbackExecutor.java  |   2 +-
 .../hadoop/ozone/util/ShutdownHookManager.java     |   2 +-
 .../common/src/main/resources/ozone-default.xml    |  59 +-
 .../java/org/apache/hadoop/hdds/TestHddsUtils.java |   4 +-
 .../hadoop/hdds/client/TestReplicationConfig.java  |   2 +-
 .../hadoop/hdds/conf/TestOzoneConfiguration.java   |  28 +
 .../java/org/apache/hadoop/hdds/fs/TestDU.java     |   2 +-
 .../ratis/TestContainerCommandRequestMessage.java  |   6 +-
 .../scm/container/TestContainerReplicaInfo.java    |  59 ++
 .../container/TestReplicationManagerReport.java    | 162 ++++
 .../hadoop/hdds/scm/container/package-info.java    |  21 +
 .../apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java |   4 +-
 .../hdds/scm/net/TestNetworkTopologyImpl.java      |  20 +-
 .../hadoop/hdds/scm/pipeline/MockPipeline.java     |   2 +-
 .../hadoop/hdds/tracing/TestStringCodec.java       |  18 +-
 .../hadoop/hdds/utils/MockGatheringChannel.java    |   2 +-
 .../hadoop/hdds/utils/TestResourceSemaphore.java   |   6 +-
 .../org/apache/hadoop/ozone/audit/DummyEntity.java |   2 +-
 .../hadoop/ozone/audit/TestOzoneAuditLogger.java   |   4 +-
 .../apache/hadoop/ozone/common/TestChecksum.java   |   2 +-
 .../hadoop/ozone/common/TestChunkBuffer.java       |  20 +-
 .../hadoop/ozone/common/TestStateMachine.java      |   4 +-
 .../ozone/container/ContainerTestHelper.java       |   2 +-
 .../hadoop/ozone/lease/TestLeaseManager.java       |   2 +-
 ...TestUpgradeUtils.java => UpgradeTestUtils.java} |   4 +-
 .../apache/hadoop/ozone/HddsDatanodeService.java   |   6 +-
 .../container/common/helpers/ContainerMetrics.java |   6 +-
 .../container/common/helpers/ContainerUtils.java   |   2 +-
 .../common/helpers/DatanodeVersionFile.java        |   8 +-
 .../ozone/container/common/impl/ContainerData.java |  19 +-
 .../container/common/impl/ContainerDataYaml.java   |  11 +-
 ...OutVersion.java => ContainerLayoutVersion.java} |  34 +-
 .../container/common/impl/HddsDispatcher.java      |  15 +-
 .../common/impl/OpenContainerBlockMap.java         |   2 +-
 .../common/statemachine/DatanodeConfiguration.java |  34 +-
 .../common/statemachine/DatanodeStateMachine.java  |  13 +-
 .../common/statemachine/StateContext.java          |  12 +-
 .../commandhandler/CommandDispatcher.java          |   2 +-
 .../common/states/datanode/InitDatanodeState.java  |   2 +-
 .../states/endpoint/VersionEndpointTask.java       |   2 +-
 .../common/transport/server/ratis/CSMMetrics.java  |   2 +-
 .../server/ratis/ContainerStateMachine.java        |   4 +-
 .../transport/server/ratis/XceiverServerRatis.java |   4 +-
 .../container/common/utils/HddsVolumeUtil.java     |   2 +-
 .../container/common/volume/MutableVolumeSet.java  |   2 +-
 .../container/common/volume/StorageVolume.java     |   2 +-
 .../ozone/container/common/volume/VolumeUsage.java |   2 +-
 .../container/keyvalue/KeyValueContainer.java      |   6 +-
 .../container/keyvalue/KeyValueContainerCheck.java |  16 +-
 .../container/keyvalue/KeyValueContainerData.java  |  10 +-
 .../ozone/container/keyvalue/KeyValueHandler.java  |  12 +-
 .../helpers/KeyValueContainerLocationUtil.java     |   2 +-
 .../container/keyvalue/impl/BlockManagerImpl.java  |   8 +-
 .../keyvalue/impl/ChunkManagerDispatcher.java      |  18 +-
 .../keyvalue/impl/FilePerBlockStrategy.java        |   4 +-
 .../keyvalue/impl/FilePerChunkStrategy.java        |   4 +-
 .../background/BlockDeletingService.java           |   4 +-
 .../container/metadata/AbstractDatanodeStore.java  |   2 +-
 .../container/ozoneimpl/ContainerController.java   |  14 +-
 .../ozoneimpl/ContainerDataScrubberMetrics.java    |   6 +-
 .../ozoneimpl/ContainerMetadataScanner.java        |   2 +-
 .../ContainerMetadataScrubberMetrics.java          |   2 +-
 .../replication/GrpcReplicationClient.java         |  17 +-
 .../container/replication/ReplicationServer.java   |  56 +-
 .../replication/ReplicationSupervisor.java         |   8 +
 .../container/stream/DirstreamClientHandler.java   |   2 +-
 .../upgrade/DataNodeUpgradeFinalizer.java          |   2 +-
 .../upgrade/VersionedDatanodeFeatures.java         |   2 +-
 .../ozone/protocol/commands/ReregisterCommand.java |   2 +-
 .../hadoop/ozone/TestHddsSecureDatanodeInit.java   |   2 +-
 .../ozone/container/common/ContainerTestUtils.java |   4 +-
 .../hadoop/ozone/container/common/ScmTestMock.java |  35 +-
 .../container/common/TestBlockDeletingService.java |  20 +-
 .../ozone/container/common/TestContainerCache.java |   2 +-
 ...ersion.java => TestContainerLayoutVersion.java} |  12 +-
 .../common/TestKeyValueContainerData.java          |  10 +-
 .../TestSchemaOneBackwardsCompatibility.java       |  36 +-
 .../container/common/helpers/TestBlockData.java    |   6 +-
 .../common/helpers/TestDatanodeVersionFile.java    |  10 +-
 .../common/impl/TestContainerDataYaml.java         |  18 +-
 .../impl/TestContainerDeletionChoosingPolicy.java  |   8 +-
 .../common/impl/TestContainerPersistence.java      |   8 +-
 .../container/common/impl/TestContainerSet.java    |  20 +-
 .../container/common/impl/TestHddsDispatcher.java  |  10 +-
 .../container/common/interfaces/TestHandler.java   |   2 +-
 .../common/report/TestReportPublisher.java         |   2 +-
 .../statemachine/TestDatanodeConfiguration.java    |  11 -
 .../TestCloseContainerCommandHandler.java          |  14 +-
 .../volume/TestRoundRobinVolumeChoosingPolicy.java |   2 +-
 .../common/volume/TestStorageVolumeChecker.java    |   8 +-
 .../container/common/volume/TestVolumeSet.java     |   2 +-
 ...tTestInfo.java => ContainerLayoutTestInfo.java} |  24 +-
 .../keyvalue/TestKeyValueBlockIterator.java        |  24 +-
 .../container/keyvalue/TestKeyValueContainer.java  |  18 +-
 .../keyvalue/TestKeyValueContainerCheck.java       |  11 +-
 .../TestKeyValueContainerMarkUnhealthy.java        |   8 +-
 .../container/keyvalue/TestKeyValueHandler.java    |  12 +-
 .../container/keyvalue/TestTarContainerPacker.java |   8 +-
 .../keyvalue/impl/AbstractTestChunkManager.java    |   8 +-
 .../keyvalue/impl/CommonChunkManagerTestCases.java |   4 +-
 .../keyvalue/impl/TestBlockManagerImpl.java        |  10 +-
 .../keyvalue/impl/TestChunkManagerDummyImpl.java   |   6 +-
 .../keyvalue/impl/TestFilePerBlockStrategy.java    |   6 +-
 .../keyvalue/impl/TestFilePerChunkStrategy.java    |  12 +-
 .../container/ozoneimpl/TestContainerReader.java   |  16 +-
 .../container/ozoneimpl/TestOzoneContainer.java    |  10 +-
 .../replication/TestGrpcOutputStream.java          |   8 +-
 .../replication/TestReplicationConfig.java         |  75 ++
 .../replication/TestReplicationSupervisor.java     |  12 +-
 .../upgrade/TestDataNodeStartupSlvLessThanMlv.java |   4 +-
 .../upgrade/TestDatanodeUpgradeToScmHA.java        |   6 +-
 hadoop-hdds/dev-support/checkstyle/checkstyle.xml  |   1 +
 hadoop-hdds/docs/content/feature/Observability.md  |   2 +-
 .../{Observability.md => Observability.zh.md}      |  63 +-
 hadoop-hdds/docs/content/security/SecuringTDE.md   |  48 +-
 .../docs/content/security/SecuringTDE.zh.md        |   4 +-
 hadoop-hdds/docs/content/tools/TestTools.md        | 129 +--
 hadoop-hdds/docs/content/tools/TestTools.zh.md     | 129 +--
 hadoop-hdds/docs/content/tools/_index.md           |   1 -
 hadoop-hdds/docs/content/tools/_index.zh.md        |   1 -
 .../themes/ozonedoc/layouts/shortcodes/image.html  |   2 +-
 .../SCMSecurityProtocolClientSideTranslatorPB.java |   2 +-
 ...inerLocationProtocolClientSideTranslatorPB.java |  90 +-
 .../scm/update/client/CRLClientUpdateHandler.java  |   2 +-
 .../hdds/scm/update/client/ClientCRLStore.java     |   4 +-
 .../update/client/SCMUpdateServiceGrpcClient.java  |   2 +-
 .../x509/certificate/authority/BaseApprover.java   |   2 +-
 .../certificate/authority/DefaultCAServer.java     |   6 +-
 .../authority/PKIProfiles/DefaultCAProfile.java    |   2 +-
 .../client/DefaultCertificateClient.java           |  32 +-
 .../certificate/client/OMCertificateClient.java    |   4 +-
 .../certificates/utils/CertificateSignRequest.java |  10 +-
 .../hadoop/hdds/security/x509/crl/CRLInfo.java     |   2 +-
 .../hdds/security/x509/crl/CRLInfoCodec.java       |   2 +-
 .../hadoop/hdds/server/http/ProfileServlet.java    |   2 +-
 .../hadoop/hdds/utils/DBCheckpointMetrics.java     |   2 +-
 .../java/org/apache/hadoop/hdds/utils/HAUtils.java |   2 +-
 .../hadoop/hdds/utils/MetadataKeyFilters.java      |   2 +-
 .../apache/hadoop/hdds/utils/TransactionInfo.java  |   2 +-
 .../hadoop/hdds/utils/db/DBConfigFromFile.java     |   4 +-
 .../org/apache/hadoop/hdds/utils/db/DBStore.java   |  10 +
 .../hadoop/hdds/utils/db/DBStoreBuilder.java       |   4 +-
 .../org/apache/hadoop/hdds/utils/db/RDBStore.java  |  11 +
 .../apache/hadoop/hdds/utils/db/TypedTable.java    |   2 +-
 .../hadoop/hdds/utils/db/cache/CacheKey.java       |   2 +-
 .../hadoop/hdds/utils/db/cache/EpochEntry.java     |   2 +-
 .../x509/certificate/authority/MockCAStore.java    |   2 +-
 .../x509/certificate/utils/TestCRLCodec.java       |   2 +-
 .../certificates/TestCertificateSignRequest.java   |   4 +-
 .../x509/certificates/TestRootCertificate.java     |   2 +-
 .../security/x509/keys/TestHDDSKeyGenerator.java   |   2 +-
 .../hadoop/hdds/utils/db/TestDBStoreBuilder.java   |  10 +-
 .../apache/hadoop/hdds/utils/db/TestRDBStore.java  |  48 +-
 .../hadoop/hdds/utils/db/TestRDBStoreIterator.java |   6 +-
 .../hadoop/hdds/utils/db/TestRDBTableStore.java    |   4 +-
 .../hdds/utils/db/TestTypedRDBTableStore.java      |   2 +-
 .../hadoop/hdds/utils/db/cache/TestTableCache.java |  20 +-
 .../src/main/proto/ScmAdminProtocol.proto          |  40 +-
 .../interface-client/src/main/proto/hdds.proto     |  25 +
 hadoop-hdds/server-scm/pom.xml                     |  10 -
 .../hadoop/hdds/scm/block/DeletedBlockLogImpl.java |   2 +-
 .../hdds/scm/container/ContainerReplicaCount.java  |  28 +-
 .../hdds/scm/container/ContainerReportHandler.java |   2 +-
 .../hdds/scm/container/ReplicationManager.java     |  91 +-
 .../balancer/AbstractFindTargetGreedy.java         |   6 +-
 .../scm/container/balancer/ContainerBalancer.java  |  51 +-
 .../balancer/ContainerBalancerConfiguration.java   | 131 +--
 .../scm/container/balancer/FindSourceGreedy.java   |   6 +-
 .../ContainerPlacementPolicyFactory.java           |   2 +-
 .../algorithms/SCMContainerPlacementMetrics.java   |   2 +-
 .../algorithms/SCMContainerPlacementRackAware.java |  12 +-
 .../container/placement/metrics/SCMMetrics.java    |   2 +-
 .../replication/ReplicationManagerMetrics.java     |  42 +
 .../scm/container/states/ContainerStateMap.java    |   2 +-
 .../apache/hadoop/hdds/scm/ha/HASecurityUtils.java |   2 +-
 .../org/apache/hadoop/hdds/scm/ha/RatisUtil.java   |   2 +-
 .../hadoop/hdds/scm/ha/SCMHAInvocationHandler.java |   9 +-
 .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java       |  35 +-
 .../hadoop/hdds/scm/ha/SCMRatisServerImpl.java     |   3 +-
 .../apache/hadoop/hdds/scm/ha/SCMStateMachine.java |   6 +-
 .../apache/hadoop/hdds/scm/ha/io/CodecFactory.java |   2 +-
 .../hdds/scm/metadata/SCMMetadataStoreImpl.java    |   4 +-
 .../hdds/scm/metadata/X509CertificateCodec.java    |   2 +-
 .../apache/hadoop/hdds/scm/node/CommandQueue.java  |   2 +-
 .../hdds/scm/node/NodeDecommissionManager.java     |  32 +-
 .../apache/hadoop/hdds/scm/node/NodeManager.java   |   2 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java     |   2 +-
 .../apache/hadoop/hdds/scm/node/NodeStatus.java    |   4 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |   8 +-
 .../hadoop/hdds/scm/node/SCMNodeMetrics.java       |   8 +-
 .../hdds/scm/node/SCMNodeStorageStatMap.java       |   2 +-
 .../hdds/scm/node/states/Node2ObjectsMap.java      |   2 +-
 .../hadoop/hdds/scm/node/states/NodeStateMap.java  |   2 +-
 .../hdds/scm/pipeline/PipelineManagerImpl.java     |   2 +-
 .../hdds/scm/pipeline/PipelineReportHandler.java   |   4 +-
 .../scm/pipeline/PipelineStateManagerImpl.java     |   7 +-
 .../hdds/scm/pipeline/RatisPipelineUtils.java      |   2 +-
 .../scm/pipeline/WritableContainerFactory.java     |   2 +-
 ...inerLocationProtocolServerSideTranslatorPB.java |  86 +-
 .../hdds/scm/safemode/ContainerSafeModeRule.java   |   4 +-
 .../hdds/scm/safemode/DataNodeSafeModeRule.java    |   2 +-
 .../hdds/scm/server/SCMBlockProtocolServer.java    |  16 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |  90 +-
 .../hdds/scm/server/SCMDatanodeProtocolServer.java |   6 +-
 .../hdds/scm/server/StorageContainerManager.java   |  12 +-
 .../server/StorageContainerManagerHttpServer.java  |   4 +-
 .../scm/server/StorageContainerManagerStarter.java |   2 +-
 .../org/apache/hadoop/hdds/scm/HddsTestUtils.java  | 692 +++++++++++++++-
 .../apache/hadoop/hdds/scm/TestHddsServerUtil.java |   2 +-
 .../hadoop/hdds/scm/TestHddsServerUtils.java       |   2 +-
 .../java/org/apache/hadoop/hdds/scm/TestUtils.java | 699 ----------------
 .../hadoop/hdds/scm/block/TestBlockManager.java    |  22 +-
 .../hadoop/hdds/scm/block/TestDeletedBlockLog.java |   4 +-
 .../command/TestCommandStatusReportHandler.java    |   4 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java |  12 +-
 .../hdds/scm/container/SimpleMockNodeManager.java  |   2 +-
 .../container/TestCloseContainerEventHandler.java  |   4 +-
 .../scm/container/TestContainerManagerImpl.java    |   6 +-
 .../scm/container/TestContainerReportHandler.java  |   4 +-
 .../TestIncrementalContainerReportHandler.java     |   8 +-
 .../hdds/scm/container/TestReplicationManager.java | 165 +++-
 .../scm/container/TestUnknownContainerReport.java  |   2 +-
 .../container/balancer/TestContainerBalancer.java  | 123 ++-
 .../algorithms/TestContainerPlacementFactory.java  |  12 +-
 .../TestSCMContainerPlacementCapacity.java         |  12 +-
 .../TestSCMContainerPlacementRackAware.java        |  24 +-
 .../TestSCMContainerPlacementRandom.java           |  16 +-
 .../replication/TestReplicationManagerMetrics.java |  97 +++
 .../states/TestContainerReplicaCount.java          |  10 +
 .../hdds/scm/crl/TestCRLStatusReportHandler.java   |   4 +-
 .../hadoop/hdds/scm/ha/TestSCMHAConfiguration.java |  26 +-
 .../hadoop/hdds/scm/ha/TestSCMRatisRequest.java    |   2 +-
 .../hdds/scm/metadata/TestPipelineIDCodec.java     |   2 +-
 .../hdds/scm/node/TestContainerPlacement.java      |   8 +-
 .../hdds/scm/node/TestDatanodeAdminMonitor.java    |   4 +-
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  |  43 +-
 .../hdds/scm/node/TestNodeDecommissionManager.java |  16 +-
 .../hdds/scm/node/TestNodeReportHandler.java       |  10 +-
 .../hadoop/hdds/scm/node/TestNodeStateManager.java |   2 +-
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |  82 +-
 .../hdds/scm/node/TestSCMNodeStorageStatMap.java   |  15 +-
 .../hadoop/hdds/scm/node/TestStatisticsUpdate.java |  16 +-
 .../hdds/scm/node/states/TestNodeStateMap.java     |   4 +-
 .../TestPipelineDatanodesIntersection.java         |   4 +-
 .../hdds/scm/pipeline/TestPipelineManagerImpl.java |  11 +-
 .../scm/pipeline/TestPipelinePlacementPolicy.java  |  16 +-
 .../scm/pipeline/TestRatisPipelineProvider.java    |   4 +-
 .../TestOneReplicaPipelineSafeModeRule.java        |   8 +-
 .../hdds/scm/safemode/TestSCMSafeModeManager.java  |   8 +-
 .../scm/server/TestSCMBlockProtocolServer.java     |   4 +-
 .../hadoop/hdds/scm/server/TestSCMCertStore.java   |   6 +-
 .../server/TestSCMUpdateServiceGrpcServer.java     |  24 +-
 .../TestSCMHAUnfinalizedStateValidationAction.java |   5 +-
 .../scm/upgrade/TestScmStartupSlvLessThanMlv.java  |   6 +-
 .../ozone/container/common/TestEndPoint.java       |  23 +-
 .../testutils/ReplicationNodeManagerMock.java      |   2 +-
 .../hadoop/ozone/scm/node/TestSCMNodeMetrics.java  |  11 +-
 .../org/apache/ozone/test/LambdaTestUtils.java     |   4 +-
 hadoop-hdds/tools/pom.xml                          |   9 +
 .../hdds/scm/cli/ContainerBalancerCommands.java    |  19 +-
 .../scm/cli/ContainerBalancerStartSubcommand.java  |  50 +-
 .../scm/cli/ContainerBalancerStatusSubcommand.java |   2 +-
 .../hdds/scm/cli/ContainerOperationClient.java     |  34 +-
 .../cli/ReplicationManagerStatusSubcommand.java    |   2 +-
 .../hdds/scm/cli/SafeModeCheckSubcommand.java      |   2 +-
 .../hdds/scm/cli/SafeModeExitSubcommand.java       |   2 +-
 .../hdds/scm/cli/container/ContainerCommands.java  |   3 +-
 .../hdds/scm/cli/container/InfoSubcommand.java     |  61 +-
 .../hdds/scm/cli/container/ReportSubcommand.java   | 116 +++
 .../scm/cli/datanode/DecommissionSubCommand.java   |   2 +-
 .../scm/cli/datanode/MaintenanceSubCommand.java    |   4 +-
 .../scm/cli/datanode/RecommissionSubCommand.java   |   2 +-
 .../hdds/scm/cli/container/TestInfoSubCommand.java | 249 ++++++
 .../scm/cli/container/TestReportSubCommand.java    | 159 ++++
 .../datanode/TestContainerBalancerSubCommand.java  |   8 +-
 .../scm/cli/datanode/TestListInfoSubcommand.java   |   6 +-
 hadoop-ozone/client/pom.xml                        |   4 +
 .../apache/hadoop/ozone/client/ObjectStore.java    |   6 +-
 .../apache/hadoop/ozone/client/OzoneBucket.java    |  21 +-
 .../hadoop/ozone/client/OzoneClientFactory.java    |   2 +-
 .../org/apache/hadoop/ozone/client/OzoneKey.java   |   2 +-
 .../apache/hadoop/ozone/client/OzoneVolume.java    |   2 +-
 .../checksum/AbstractBlockChecksumComputer.java}   |  52 +-
 .../client/checksum/BaseFileChecksumHelper.java    | 200 +++++
 .../checksum/ReplicatedBlockChecksumComputer.java  |  72 ++
 .../checksum/ReplicatedFileChecksumHelper.java     | 187 +++++
 .../hadoop/ozone/client/checksum/package-info.java |  23 +
 .../ozone/client/io/BlockOutputStreamEntry.java    |   6 +-
 .../hadoop/ozone/client/io/KeyInputStream.java     |   8 +-
 .../ozone/client/protocol/ClientProtocol.java      |  27 +
 .../hadoop/ozone/client/rpc/OzoneKMSUtil.java      |   4 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  | 116 ++-
 .../hadoop/ozone/client/TestHddsClientUtils.java   |   4 +-
 .../TestReplicatedBlockChecksumComputer.java       |  68 ++
 .../checksum/TestReplicatedFileChecksumHelper.java | 323 ++++++++
 .../hadoop/ozone/client/checksum/package-info.java |  23 +
 .../client/src/test/resources/log4j.properties     |  23 -
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |   4 +-
 .../java/org/apache/hadoop/ozone/OzoneAcl.java     |   8 +-
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |  11 +-
 .../hadoop/ozone/om/helpers/OMNodeDetails.java     |   6 +-
 .../hadoop/ozone/om/helpers/OmBucketArgs.java      |  46 +-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java      |  11 +-
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |   2 +-
 .../hadoop/ozone/om/helpers/OmKeyLocationInfo.java |   2 +-
 .../ozone/om/helpers/OmKeyLocationInfoGroup.java   |   4 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java      |   2 +-
 .../hadoop/ozone/om/helpers/OzoneAclUtil.java      |   4 +-
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |   2 +-
 .../hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java |   6 +-
 .../hadoop/ozone/om/helpers/ServiceInfo.java       |   2 +-
 .../hadoop/ozone/om/helpers/WithObjectID.java      |   2 +-
 .../ozone/om/protocol/OzoneManagerProtocol.java    |  12 +
 .../ozone/om/protocolPB/OmTransportFactory.java    |  30 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  30 +-
 .../apache/hadoop/ozone/protocolPB/OMPBHelper.java |  14 +-
 .../hadoop/ozone/security/acl/OzoneObjInfo.java    |   2 +-
 .../apache/hadoop/ozone/util/OzoneVersionInfo.java |   2 +-
 .../org/apache/hadoop/ozone/util/RadixTree.java    |   2 +-
 .../ozone/om/ha/TestOMFailoverProxyProvider.java   |   2 +-
 .../hadoop/ozone/om/lock/TestOzoneManagerLock.java |   2 +-
 .../ozone/security/TestGDPRSymmetricKey.java       |   2 +-
 .../ozone/security/acl/TestOzoneObjInfo.java       |   4 +-
 .../apache/hadoop/ozone/util/TestRadixTree.java    |   4 +-
 hadoop-ozone/dev-support/checks/bats.sh            |   8 +-
 hadoop-ozone/dev-support/checks/checkstyle.sh      |   3 +
 hadoop-ozone/dev-support/checks/coverage.sh        |   1 -
 hadoop-ozone/dist/pom.xml                          |   2 +-
 .../dist/src/main/compose/ozone-csi/docker-config  |   1 +
 .../dist/src/main/compose/ozone-ha/docker-config   |   4 +
 .../dist/src/main/compose/ozone/docker-config      |   1 +
 hadoop-ozone/dist/src/main/compose/ozone/test.sh   |   6 -
 .../compose/ozonesecure-ha/keytabs/HTTP.keytab     | Bin 144 -> 0 bytes
 .../main/compose/ozonesecure-ha/keytabs/dn.keytab  | Bin 278 -> 0 bytes
 .../main/compose/ozonesecure-ha/keytabs/om.keytab  | Bin 278 -> 0 bytes
 .../compose/ozonesecure-ha/keytabs/recon.keytab    | Bin 296 -> 0 bytes
 .../main/compose/ozonesecure-ha/keytabs/s3g.keytab | Bin 434 -> 0 bytes
 .../main/compose/ozonesecure-ha/keytabs/scm.keytab | Bin 586 -> 0 bytes
 .../compose/ozonesecure-ha/keytabs/testuser.keytab | Bin 152 -> 0 bytes
 .../ozonesecure-ha/keytabs/testuser2.keytab        | Bin 154 -> 0 bytes
 .../src/main/compose/ozonesecure-mr/docker-config  |   1 +
 .../compose/ozonesecure-mr/keytabs/HTTP.keytab     | Bin 144 -> 0 bytes
 .../main/compose/ozonesecure-mr/keytabs/dn.keytab  | Bin 278 -> 0 bytes
 .../main/compose/ozonesecure-mr/keytabs/om.keytab  | Bin 278 -> 0 bytes
 .../compose/ozonesecure-mr/keytabs/recon.keytab    | Bin 296 -> 0 bytes
 .../main/compose/ozonesecure-mr/keytabs/s3g.keytab | Bin 434 -> 0 bytes
 .../main/compose/ozonesecure-mr/keytabs/scm.keytab | Bin 586 -> 0 bytes
 .../compose/ozonesecure-mr/keytabs/testuser.keytab | Bin 152 -> 0 bytes
 .../ozonesecure-mr/keytabs/testuser2.keytab        | Bin 154 -> 0 bytes
 .../dist/src/main/compose/ozonesecure-mr/test.sh   |   4 +-
 .../src/main/compose/ozonesecure/docker-config     |   2 +
 .../main/compose/ozonesecure/keytabs/HTTP.keytab   | Bin 144 -> 0 bytes
 .../src/main/compose/ozonesecure/keytabs/dn.keytab | Bin 278 -> 0 bytes
 .../src/main/compose/ozonesecure/keytabs/om.keytab | Bin 278 -> 0 bytes
 .../main/compose/ozonesecure/keytabs/recon.keytab  | Bin 296 -> 0 bytes
 .../main/compose/ozonesecure/keytabs/s3g.keytab    | Bin 434 -> 0 bytes
 .../main/compose/ozonesecure/keytabs/scm.keytab    | Bin 586 -> 0 bytes
 .../compose/ozonesecure/keytabs/testuser.keytab    | Bin 152 -> 0 bytes
 .../compose/ozonesecure/keytabs/testuser2.keytab   | Bin 154 -> 0 bytes
 .../main/compose/upgrade/compose/ha/docker-config  |   4 +-
 .../compose/upgrade/compose/non-ha/docker-config   |   2 +-
 hadoop-ozone/dist/src/main/docker/Dockerfile       |   1 -
 .../dist/src/main/dockerlibexec/entrypoint.sh      |   2 +-
 hadoop-ozone/dist/src/main/k8s/examples/testlib.sh |   5 +-
 .../src/main/smoketest/compatibility/read.robot    |   9 +-
 .../compatibility/{write.robot => setup.robot}     |  16 +-
 .../src/main/smoketest/compatibility/write.robot   |   7 +-
 hadoop-ozone/dist/src/shell/ozone/ozone            |  17 -
 .../apache/hadoop/ozone/MiniOzoneChaosCluster.java |  49 +-
 .../hadoop/ozone/MiniOzoneLoadGenerator.java       |   2 +-
 .../hadoop/ozone/loadgenerators/LoadBucket.java    |   2 +-
 hadoop-ozone/insight/pom.xml                       |   1 -
 hadoop-ozone/integration-test/pom.xml              |  10 -
 .../fs/ozone/TestOzoneFSWithObjectStoreCreate.java |  14 +-
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   |   2 +-
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  44 +-
 .../apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java  |   8 +-
 .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java |  34 +-
 .../contract/ITestOzoneContractDistCpWithFSO.java  |  65 ++
 .../fs/ozone/contract/ITestOzoneContractUtils.java |   4 +-
 .../hadoop/fs/ozone/contract/OzoneContract.java    |  22 +-
 .../metrics/TestSCMContainerManagerMetrics.java    |   2 +
 .../hdds/scm/pipeline/TestLeaderChoosePolicy.java  |   2 +-
 .../hdds/scm/pipeline/TestMultiRaftSetup.java      |   3 +-
 .../hdds/scm/pipeline/TestPipelineClose.java       |   4 +-
 .../TestRatisPipelineCreateAndDestroy.java         |   2 +-
 .../safemode/TestSCMSafeModeWithPipelineRules.java |   6 +-
 .../hadoop/hdds/upgrade/TestHDDSUpgrade.java       |   8 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |   2 +-
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |  24 +-
 .../hadoop/ozone/MiniOzoneClusterProvider.java     |   4 +-
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java       | 378 ++++++---
 .../hadoop/ozone/MiniOzoneOMHAClusterImpl.java     | 116 ---
 .../org/apache/hadoop/ozone/OzoneTestUtils.java    |  25 +-
 .../ozone/TestContainerBalancerOperations.java     |  23 +-
 .../hadoop/ozone/TestContainerOperations.java      |   2 +-
 .../java/org/apache/hadoop/ozone/TestDataUtil.java |   2 +-
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |   6 +-
 .../hadoop/ozone/TestMiniOzoneOMHACluster.java     |   6 +-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   3 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java       |  37 +-
 .../hadoop/ozone/TestStorageContainerManager.java  |  11 +-
 .../ozone/client/CertificateClientTestImpl.java    |   4 +-
 .../apache/hadoop/ozone/client/rpc/TestBCSID.java  |   7 +-
 .../hadoop/ozone/client/rpc/TestCommitWatcher.java |   2 +-
 .../client/rpc/TestContainerStateMachine.java      |   5 +-
 .../rpc/TestContainerStateMachineFailures.java     | 161 +++-
 .../rpc/TestContainerStateMachineFlushDelay.java   |   4 +-
 .../client/rpc/TestDeleteWithSlowFollower.java     |  11 +-
 .../client/rpc/TestDiscardPreallocatedBlocks.java  |   2 +-
 .../client/rpc/TestFailureHandlingByClient.java    |   2 +-
 .../client/rpc/TestOzoneAtRestEncryption.java      |  22 +-
 .../rpc/TestOzoneClientMultipartUploadWithFSO.java |  56 +-
 .../rpc/TestOzoneClientRetriesOnExceptions.java    |   4 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     | 175 ++--
 .../rpc/TestOzoneRpcClientForAclAuditLog.java      |  18 +-
 .../TestOzoneRpcClientWithKeyLatestVersion.java    |   2 +-
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |   6 +-
 .../ozone/client/rpc/TestSecureOzoneRpcClient.java |  12 +-
 .../ozone/client/rpc/TestWatchForCommit.java       |   2 +-
 .../client/rpc/read/TestChunkInputStream.java      |   4 +-
 .../ozone/client/rpc/read/TestInputStreamBase.java |  17 +-
 .../ozone/client/rpc/read/TestKeyInputStream.java  |  12 +-
 .../apache/hadoop/ozone/container/TestHelper.java  |   7 +-
 .../commandhandler/TestBlockDeletion.java          |   1 +
 .../TestCloseContainerByPipeline.java              |   2 +-
 .../commandhandler/TestCloseContainerHandler.java  |   4 +-
 .../commandhandler/TestDeleteContainerHandler.java |   5 +-
 .../container/metrics/TestContainerMetrics.java    |   5 +-
 .../container/ozoneimpl/TestOzoneContainer.java    |   2 +-
 .../container/server/TestContainerServer.java      |   3 +-
 .../server/TestSecureContainerServer.java          |   7 +-
 .../ozone/dn/ratis/TestDnRatisLogParser.java       |   2 +
 .../hadoop/ozone/dn/scrubber/TestDataScrubber.java |   7 +-
 .../TestDatanodeHddsVolumeFailureDetection.java    |   9 +-
 .../freon/TestHadoopDirTreeGeneratorWithFSO.java   |   4 +-
 .../ozone/freon/TestHadoopNestedDirGenerator.java  |  26 +-
 .../ozone/om/TestContainerReportWithKeys.java      |   2 +-
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 387 ++++-----
 .../org/apache/hadoop/ozone/om/TestKeyPurging.java |   2 +-
 .../hadoop/ozone/om/TestOMRatisSnapshots.java      |   6 +-
 .../hadoop/ozone/om/TestOMUpgradeFinalization.java |   6 +-
 .../hadoop/ozone/om/TestObjectStoreWithFSO.java    |   8 +-
 .../org/apache/hadoop/ozone/om/TestOmLDBCli.java   |  18 +-
 .../org/apache/hadoop/ozone/om/TestOmMetrics.java  |   2 +-
 .../ozone/om/TestOmStartupSlvLessThanMlv.java      |   6 +-
 .../ozone/om/TestOzoneManagerConfiguration.java    |   4 +-
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java |  12 +-
 .../ozone/om/TestOzoneManagerHAMetadataOnly.java   |   2 +-
 .../hadoop/ozone/om/TestOzoneManagerHAWithACL.java |   8 +-
 .../ozone/om/TestOzoneManagerHAWithData.java       |   4 +-
 .../ozone/om/TestOzoneManagerHAWithFailover.java   |   2 +-
 .../hadoop/ozone/om/TestOzoneManagerPrepare.java   |   2 +-
 .../ozone/om/TestOzoneManagerRestInterface.java    |   2 +-
 .../hadoop/ozone/om/TestRecursiveAclWithFSO.java   |   4 +-
 .../om/ratis/TestOzoneManagerRatisRequest.java     |   4 +-
 .../snapshot/TestOzoneManagerSnapshotProvider.java |   8 +-
 .../hadoop/ozone/recon/TestReconScmHASnapshot.java |  70 ++
 .../hadoop/ozone/recon/TestReconScmSnapshot.java   | 133 +++
 .../ozone/recon/TestReconWithOzoneManager.java     |  10 +-
 .../ozone/recon/TestReconWithOzoneManagerFSO.java  |  14 +-
 .../ozone/recon/TestReconWithOzoneManagerHA.java   |  12 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java    |   2 +-
 .../hadoop/ozone/scm/TestCloseContainer.java       |   4 +-
 .../hadoop/ozone/scm/TestFailoverWithSCMHA.java    |   2 +-
 .../ozone/scm/TestSCMInstallSnapshotWithHA.java    |  18 +-
 .../org/apache/hadoop/ozone/scm/TestSCMMXBean.java |   6 +-
 .../ozone/scm/TestStorageContainerManagerHA.java   |   4 +-
 .../hadoop/ozone/scm/TestXceiverClientGrpc.java    |  10 +-
 .../scm/node/TestDecommissionAndMaintenance.java   |  24 +-
 .../hadoop/ozone/scm/node/TestQueryNode.java       |   2 +-
 .../hadoop/ozone/shell/TestNSSummaryAdmin.java     |   4 +-
 .../hadoop/ozone/shell/TestOzoneShellHA.java       |  16 +-
 .../src/main/proto/OmClientProtocol.proto          |   4 +-
 .../hadoop/ozone/om/codec/TestOmKeyInfoCodec.java  |   4 +-
 .../ozone/om/codec/TestRepeatedOmKeyInfoCodec.java |   4 +-
 .../ozone/om/helpers/TestInstanceHelper.java       |  68 --
 .../hadoop/ozone/om/helpers/TestOmPrefixInfo.java  |  39 +-
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |   2 +-
 .../org/apache/hadoop/ozone/om/KeyManager.java     | 111 ---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 922 +--------------------
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |   8 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  22 +-
 .../org/apache/hadoop/ozone/om/OzoneAclUtils.java  |   2 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  18 +-
 .../hadoop/ozone/om/OzoneManagerHttpServer.java    |   4 +-
 .../hadoop/ozone/om/OzoneManagerPrepareState.java  |   4 +-
 .../hadoop/ozone/om/OzoneManagerStarter.java       |   2 +-
 .../hadoop/ozone/om/S3SecretManagerImpl.java       |   2 +-
 .../hadoop/ozone/om/TrashOzoneFileSystem.java      |  12 +-
 .../apache/hadoop/ozone/om/TrashPolicyOzone.java   |  12 +-
 .../apache/hadoop/ozone/om/fs/OzoneManagerFS.java  |   6 -
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java   |   4 +-
 .../ozone/om/ratis/OzoneManagerStateMachine.java   |   6 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   9 +-
 .../hadoop/ozone/om/request/OMClientRequest.java   |   6 +-
 .../om/request/bucket/OMBucketCreateRequest.java   |   6 +-
 ...tyRequest.java => OMBucketSetOwnerRequest.java} | 223 ++---
 .../request/bucket/OMBucketSetPropertyRequest.java |   6 +-
 .../request/bucket/acl/OMBucketSetAclRequest.java  |   2 +-
 .../om/request/file/OMDirectoryCreateRequest.java  |   2 +-
 .../ozone/om/request/file/OMFileCreateRequest.java |   2 +-
 .../ozone/om/request/file/OMFileRequest.java       |  10 +-
 .../ozone/om/request/key/OMKeyCommitRequest.java   |   4 +-
 .../ozone/om/request/key/OMKeyCreateRequest.java   |   2 +-
 .../ozone/om/request/key/OMKeyRenameRequest.java   |   2 +-
 .../om/request/key/OMKeyRenameRequestWithFSO.java  |   2 +-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |   8 +-
 .../key/OMOpenKeysDeleteRequest.java               |   4 +-
 .../ozone/om/request/key/acl/OMKeyAclRequest.java  |   3 +-
 .../multipart/S3MultipartUploadAbortRequest.java   |   2 +-
 .../om/request/volume/OMVolumeSetQuotaRequest.java |   6 +-
 .../om/request/volume/acl/OMVolumeAclRequest.java  |   2 +-
 .../request/volume/acl/OMVolumeAddAclRequest.java  |   2 +-
 .../volume/acl/OMVolumeRemoveAclRequest.java       |   2 +-
 .../request/volume/acl/OMVolumeSetAclRequest.java  |   2 +-
 .../response/bucket/OMBucketSetOwnerResponse.java  |  80 ++
 .../OzoneDelegationTokenSecretManager.java         |   2 +-
 .../hadoop/ozone/security/OzoneSecretStore.java    |   4 +-
 .../org/apache/hadoop/ozone/om/OmTestManagers.java | 122 +++
 .../hadoop/ozone/om/TestBucketManagerImpl.java     |   6 +-
 .../hadoop/ozone/om/TestKeyDeletingService.java    | 106 ++-
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java |  72 +-
 .../apache/hadoop/ozone/om/TestOMDBDefinition.java |   2 +-
 .../hadoop/ozone/om/TestOmMetadataManager.java     |  50 +-
 .../apache/hadoop/ozone/om/TestTrashService.java   |  40 +-
 .../hadoop/ozone/om/failover/TestOMFailovers.java  |   2 +-
 ...tOzoneManagerDoubleBufferWithDummyResponse.java |   2 +-
 ...TestOzoneManagerDoubleBufferWithOMResponse.java |  12 +-
 .../om/ratis/TestOzoneManagerRatisServer.java      |   2 +-
 .../om/ratis/TestOzoneManagerStateMachine.java     |   2 +-
 ...OMRequestUtils.java => OMRequestTestUtils.java} |  10 +-
 .../request/TestOMClientRequestWithUserInfo.java   |   2 +-
 .../request/bucket/TestOMBucketCreateRequest.java  |   8 +-
 .../bucket/TestOMBucketCreateRequestWithFSO.java   |   4 +-
 .../request/bucket/TestOMBucketDeleteRequest.java  |   6 +-
 .../bucket/TestOMBucketSetPropertyRequest.java     |   8 +-
 .../bucket/acl/TestOMBucketAddAclRequest.java      |  12 +-
 .../bucket/acl/TestOMBucketRemoveAclRequest.java   |  14 +-
 .../bucket/acl/TestOMBucketSetAclRequest.java      |  12 +-
 .../request/file/TestOMDirectoryCreateRequest.java |  24 +-
 .../file/TestOMDirectoryCreateRequestWithFSO.java  |  48 +-
 .../om/request/file/TestOMFileCreateRequest.java   |  36 +-
 .../file/TestOMFileCreateRequestWithFSO.java       |  22 +-
 .../om/request/key/TestOMAllocateBlockRequest.java |  12 +-
 .../key/TestOMAllocateBlockRequestWithFSO.java     |  10 +-
 .../ozone/om/request/key/TestOMKeyAclRequest.java  |  10 +-
 .../om/request/key/TestOMKeyAclRequestWithFSO.java |   8 +-
 .../om/request/key/TestOMKeyCommitRequest.java     |  16 +-
 .../request/key/TestOMKeyCommitRequestWithFSO.java |  10 +-
 .../om/request/key/TestOMKeyCreateRequest.java     |  10 +-
 .../request/key/TestOMKeyCreateRequestWithFSO.java |   6 +-
 .../om/request/key/TestOMKeyDeleteRequest.java     |  10 +-
 .../request/key/TestOMKeyDeleteRequestWithFSO.java |  14 +-
 .../key/TestOMKeyPurgeRequestAndResponse.java      |  10 +-
 .../om/request/key/TestOMKeyRenameRequest.java     |  15 +-
 .../om/request/key/TestOMKeysDeleteRequest.java    |   6 +-
 .../om/request/key/TestOMKeysRenameRequest.java    |   6 +-
 .../request/key/TestOMOpenKeysDeleteRequest.java   |  13 +-
 .../om/request/key/TestOMPrefixAclRequest.java     |   6 +-
 .../TestS3InitiateMultipartUploadRequest.java      |   6 +-
 ...estS3InitiateMultipartUploadRequestWithFSO.java |   4 +-
 .../s3/multipart/TestS3MultipartRequest.java       |  12 +-
 .../TestS3MultipartUploadAbortRequest.java         |   8 +-
 .../TestS3MultipartUploadAbortRequestWithFSO.java  |   4 +-
 .../TestS3MultipartUploadCommitPartRequest.java    |  12 +-
 ...tS3MultipartUploadCommitPartRequestWithFSO.java |  10 +-
 .../TestS3MultipartUploadCompleteRequest.java      |  14 +-
 ...estS3MultipartUploadCompleteRequestWithFSO.java |   8 +-
 .../upgrade/TestOMCancelPrepareRequest.java        |   2 +-
 .../request/volume/TestOMVolumeCreateRequest.java  |   6 +-
 .../request/volume/TestOMVolumeDeleteRequest.java  |  12 +-
 .../volume/TestOMVolumeSetOwnerRequest.java        |  27 +-
 .../volume/TestOMVolumeSetQuotaRequest.java        |  29 +-
 .../volume/acl/TestOMVolumeAddAclRequest.java      |  12 +-
 .../volume/acl/TestOMVolumeRemoveAclRequest.java   |  14 +-
 .../volume/acl/TestOMVolumeSetAclRequest.java      |  12 +-
 .../ozone/om/response/TestCleanupTableInfo.java    |   4 +-
 .../file/TestOMDirectoryCreateResponse.java        |   4 +-
 .../file/TestOMDirectoryCreateResponseWithFSO.java |   4 +-
 .../file/TestOMFileCreateResponseWithFSO.java      |   4 +-
 .../response/key/TestOMAllocateBlockResponse.java  |   4 +-
 .../key/TestOMAllocateBlockResponseWithFSO.java    |   4 +-
 .../om/response/key/TestOMKeyCommitResponse.java   |   6 +-
 .../key/TestOMKeyCommitResponseWithFSO.java        |   8 +-
 .../key/TestOMKeyCreateResponseWithFSO.java        |   4 +-
 .../om/response/key/TestOMKeyDeleteResponse.java   |   4 +-
 .../key/TestOMKeyDeleteResponseWithFSO.java        |  12 +-
 .../om/response/key/TestOMKeyRenameResponse.java   |  14 +-
 .../ozone/om/response/key/TestOMKeyResponse.java   |   4 +-
 .../om/response/key/TestOMKeysDeleteResponse.java  |   4 +-
 .../om/response/key/TestOMKeysRenameResponse.java  |   8 +-
 .../response/key/TestOMOpenKeysDeleteResponse.java |   8 +-
 ...S3MultipartUploadCommitPartResponseWithFSO.java |  10 +-
 ...stS3MultipartUploadCompleteResponseWithFSO.java |  18 +-
 .../ozone/om/upgrade/TestOMUpgradeFinalizer.java   |   2 +-
 .../om/upgrade/TestOzoneManagerPrepareState.java   |   2 +-
 .../TestOzoneDelegationTokenSecretManager.java     |   6 +-
 .../ozone/security/TestOzoneTokenIdentifier.java   |   6 +-
 .../security/acl/TestOzoneNativeAuthorizer.java    |  89 +-
 .../hadoop/ozone/security/acl/TestParentAcl.java   |  50 +-
 .../hadoop/ozone/security/acl/TestVolumeOwner.java |  51 +-
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |   2 +-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |   4 +-
 .../apache/hadoop/fs/ozone/OzoneClientUtils.java   |   2 +-
 .../apache/hadoop/fs/ozone/OzoneFSInputStream.java |   2 +-
 .../org/hadoop/ozone/recon/codegen/SqlDbUtils.java |   4 +-
 .../apache/hadoop/ozone/recon/ReconConstants.java  |   2 +
 .../hadoop/ozone/recon/ReconServerConfigKeys.java  |  23 +
 .../ozone/recon/api/MetricsProxyEndpoint.java      |   4 +-
 .../hadoop/ozone/recon/api/NSSummaryEndpoint.java  |   2 +-
 .../hadoop/ozone/recon/api/PipelineEndpoint.java   |   2 +-
 .../hadoop/ozone/recon/codec/NSSummaryCodec.java   |   2 +-
 .../ozone/recon/fsck/ContainerHealthTask.java      |   6 +-
 .../ozone/recon/scm/ReconContainerManager.java     |   4 +-
 .../hadoop/ozone/recon/scm/ReconNodeManager.java   |  11 +
 .../ozone/recon/scm/ReconPipelineFactory.java      |   2 +-
 .../scm/ReconStorageContainerManagerFacade.java    | 129 ++-
 .../recon/spi/StorageContainerServiceProvider.java |  12 +
 .../recon/spi/impl/ContainerKeyPrefixCodec.java    |   2 +-
 .../spi/impl/OzoneManagerServiceProviderImpl.java  |  61 +-
 .../impl/StorageContainerServiceProviderImpl.java  | 139 +++-
 .../ozone/recon/tasks/FileSizeCountTask.java       |   4 +-
 .../hadoop/ozone/recon/tasks/TableCountTask.java   |   2 +-
 .../ozone/recon/api/TestTaskStatusService.java     |   2 +-
 .../ozone/recon/fsck/TestContainerHealthTask.java  |   2 +-
 .../TestUtilizationSchemaDefinition.java           |   2 +-
 .../impl/TestOzoneManagerServiceProviderImpl.java  |  88 +-
 .../TestStorageContainerServiceProviderImpl.java   |  12 +
 .../recon/tasks/TestContainerKeyMapperTask.java    |   2 +-
 .../ozone/recon/tasks/TestOMDBUpdatesHandler.java  |   4 +-
 .../ozone/recon/tasks/TestTableCountTask.java      |   2 +-
 .../apache/hadoop/ozone/s3/OzoneClientCache.java   |  10 +-
 .../hadoop/ozone/s3/OzoneClientProducer.java       |  23 +-
 .../hadoop/ozone/s3/OzoneServiceProvider.java      |  78 --
 .../hadoop/ozone/s3/VirtualHostStyleFilter.java    |   8 +-
 .../hadoop/ozone/s3/endpoint/BucketEndpoint.java   |  19 +-
 .../hadoop/ozone/s3/endpoint/EndpointBase.java     |   2 +-
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  43 +-
 .../org/apache/hadoop/ozone/s3/endpoint/S3Acl.java |   6 +-
 .../hadoop/ozone/s3/endpoint/S3BucketAcl.java      |   4 +-
 .../hadoop/ozone/s3/exception/S3ErrorTable.java    |  12 +-
 .../hadoop/ozone/s3/signature/Credential.java      |   2 +-
 .../hadoop/ozone/client/OzoneBucketStub.java       |   2 +-
 .../ozone/protocolPB/TestGrpcOmTransport.java      |   5 +
 .../hadoop/ozone/s3/TestOzoneClientProducer.java   | 131 ++-
 .../ozone/s3/TestVirtualHostStyleFilter.java       |   2 +-
 .../s3/commontypes/TestObjectKeyNameAdapter.java   |   2 +-
 .../hadoop/ozone/s3/endpoint/TestObjectHead.java   |   2 +-
 .../hadoop/ozone/s3/endpoint/TestRootList.java     |   2 +-
 .../tools/dev-support/findbugsExcludeFile.xml      |   4 -
 hadoop-ozone/tools/pom.xml                         |  15 -
 .../ozone/admin/om/FinalizeUpgradeSubCommand.java  |   6 +-
 .../admin/scm/FinalizeScmUpgradeSubcommand.java    |   6 +-
 .../admin/scm/FinalizeUpgradeCommandUtil.java      |   4 +-
 .../hadoop/ozone/audit/parser/AuditParser.java     |   2 +-
 .../ozone/audit/parser/common/DatabaseHelper.java  |  16 +-
 .../parser/handler/TemplateCommandHandler.java     |   2 +-
 .../ozone/audit/parser/model/AuditEntry.java       |  22 +-
 .../apache/hadoop/ozone/debug/ChunkKeyHandler.java |  12 +-
 .../org/apache/hadoop/ozone/debug/DBScanner.java   |   8 +-
 .../apache/hadoop/ozone/debug/PrefixParser.java    |   2 +-
 .../apache/hadoop/ozone/debug/ReadReplicas.java    | 247 ++++++
 .../hadoop/ozone/freon/BaseFreonGenerator.java     |   2 +-
 .../hadoop/ozone/freon/ChunkManagerDiskWrite.java  |   8 +-
 .../hadoop/ozone/freon/DatanodeChunkGenerator.java |  12 +-
 .../hadoop/ozone/freon/HadoopDirTreeGenerator.java |   2 +-
 .../containergenerator/GeneratorDatanode.java      |   6 +-
 .../GenerateOzoneRequiredConfigurations.java       |   2 +-
 .../hadoop/ozone/genesis/BenchMarkCRCBatch.java    | 141 ----
 .../ozone/genesis/BenchMarkCRCStreaming.java       | 173 ----
 .../ozone/genesis/BenchMarkContainerStateMap.java  | 199 -----
 .../ozone/genesis/BenchMarkDatanodeDispatcher.java | 339 --------
 .../ozone/genesis/BenchMarkOzoneManager.java       | 193 -----
 .../apache/hadoop/ozone/genesis/BenchMarkSCM.java  | 126 ---
 .../ozone/genesis/BenchmarkBlockDataToString.java  | 166 ----
 .../ozone/genesis/BenchmarkChunkManager.java       | 180 ----
 .../org/apache/hadoop/ozone/genesis/Genesis.java   | 108 ---
 .../ozone/genesis/GenesisMemoryProfiler.java       |  61 --
 .../apache/hadoop/ozone/genesis/GenesisUtil.java   | 162 ----
 .../apache/hadoop/ozone/genesis/package-info.java  |  25 -
 .../apache/hadoop/ozone/shell/OzoneAddress.java    |   4 +-
 .../hadoop/ozone/shell/bucket/BucketCommands.java  |   3 +-
 .../ozone/shell/bucket/CreateBucketHandler.java    |   2 +-
 .../ozone/shell/bucket/UpdateBucketHandler.java    |  62 ++
 .../hadoop/ozone/audit/parser/TestAuditParser.java |   2 +-
 .../hadoop/ozone/conf/TestGetConfOptions.java      |   4 +-
 .../TestGenerateOzoneRequiredConfigurations.java   |   4 +-
 .../org/apache/hadoop/test/OzoneTestDriver.java    |   6 +-
 pom.xml                                            |  13 -
 744 files changed, 9071 insertions(+), 7635 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 01/08: Revert "HDDS-5648. Track CI workflow tests temporarily disabled for feature branch (#3257)"

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch HDDS-4440-s3-performance
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit d5ade578f41526bde3e6244047ba2668f122129f
Author: Doroszlai, Attila <ad...@apache.org>
AuthorDate: Mon Apr 11 19:12:00 2022 +0200

    Revert "HDDS-5648. Track CI workflow tests temporarily disabled for feature branch (#3257)"
    
    This reverts commit 3f585ab05de0f11a33bb650002c694f59b38d865.
---
 hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config | 1 -
 hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh       | 2 +-
 2 files changed, 1 insertion(+), 2 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
index c22505ff9a..fa38aad00d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
@@ -38,7 +38,6 @@ OZONE-SITE.XML_ozone.datanode.pipeline.limit=1
 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
 OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1
 OZONE-SITE.XML_hdds.container.report.interval=60s
-OZONE-SITE.XML_ozone.om.s3.grpc.server_enabled=true
 OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
 OZONE-SITE.XML_ozone.recon.address=recon:9891
 OZONE-SITE.XML_ozone.recon.http-address=0.0.0.0:9888
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
index 3a21ef475d..16ba1c20fd 100755
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
@@ -32,7 +32,7 @@ start_docker_env
 
 execute_robot_test ${SCM} basic/ozone-shell-single.robot
 execute_robot_test ${SCM} basic/links.robot
-execute_robot_test ${SCM} s3
+#execute_robot_test ${SCM} s3
 execute_robot_test ${SCM} freon
 
 stop_docker_env


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 08/08: HDDS-5648. Track CI workflow tests temporarily disabled for feature branch (#3257)

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch HDDS-4440-s3-performance
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 062890ee4b9814af35e8d29c86f02cca0e226f3c
Author: Neil Joshi <ne...@gmail.com>
AuthorDate: Mon Apr 4 09:55:46 2022 -0600

    HDDS-5648. Track CI workflow tests temporarily disabled for feature branch (#3257)
---
 hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config | 1 +
 hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh       | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
index fa38aad00d..c22505ff9a 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
@@ -38,6 +38,7 @@ OZONE-SITE.XML_ozone.datanode.pipeline.limit=1
 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
 OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1
 OZONE-SITE.XML_hdds.container.report.interval=60s
+OZONE-SITE.XML_ozone.om.s3.grpc.server_enabled=true
 OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
 OZONE-SITE.XML_ozone.recon.address=recon:9891
 OZONE-SITE.XML_ozone.recon.http-address=0.0.0.0:9888
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
index 16ba1c20fd..3a21ef475d 100755
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
@@ -32,7 +32,7 @@ start_docker_env
 
 execute_robot_test ${SCM} basic/ozone-shell-single.robot
 execute_robot_test ${SCM} basic/links.robot
-#execute_robot_test ${SCM} s3
+execute_robot_test ${SCM} s3
 execute_robot_test ${SCM} freon
 
 stop_docker_env


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 06/08: HDDS-5544. Update GRPC OmTransport implementation for HA (#2901)

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch HDDS-4440-s3-performance
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 76e238118de06bfa6f4c997b92ae57baa79f5a1b
Author: Neil Joshi <ne...@gmail.com>
AuthorDate: Wed Mar 9 12:19:01 2022 -0700

    HDDS-5544. Update GRPC OmTransport implementation for HA (#2901)
---
 .../java/org/apache/hadoop/hdds/HddsUtils.java     |  20 +++
 .../java/org/apache/hadoop/hdds/TestHddsUtils.java |  39 +++-
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   3 +-
 .../ozone/om/ha/GrpcOMFailoverProxyProvider.java   | 143 +++++++++++++++
 .../ozone/om/ha/OMFailoverProxyProvider.java       |  22 +--
 .../ozone/om/protocolPB/GrpcOmTransport.java       | 196 +++++++++++++++++----
 .../ozone/om/protocolPB/TestS3GrpcOmTransport.java | 119 +++++++++++--
 .../src/main/compose/ozone-om-ha/docker-config     |   1 +
 .../src/main/compose/ozonesecure-ha/docker-config  |   1 +
 .../dist/src/main/compose/ozonesecure-ha/test.sh   |   2 +-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   3 +-
 .../hadoop/ozone/om/GrpcOzoneManagerServer.java    |  20 ++-
 .../hadoop/ozone/om/OzoneManagerServiceGrpc.java   |  43 +----
 .../hadoop/ozone/om/failover/TestOMFailovers.java  |   2 +-
 14 files changed, 516 insertions(+), 98 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index ffbb3e3340..364377d396 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -227,6 +227,26 @@ public final class HddsUtils {
     }
   }
 
+  /**
+   * Retrieve a number, trying the supplied config keys in order.
+   * Each config value may be absent
+   *
+   * @param conf Conf
+   * @param keys a list of configuration key names.
+   *
+   * @return first number found from the given keys, or absent.
+   */
+  public static OptionalInt getNumberFromConfigKeys(
+      ConfigurationSource conf, String... keys) {
+    for (final String key : keys) {
+      final String value = conf.getTrimmed(key);
+      if (value != null) {
+        return OptionalInt.of(Integer.parseInt(value));
+      }
+    }
+    return OptionalInt.empty();
+  }
+
   /**
    * Retrieve the port number, trying the supplied config keys in order.
    * Each config value may be absent, or if present in the format
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
index fd8aa28e63..67001010d5 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
@@ -36,6 +36,8 @@ import static org.apache.hadoop.hdds.HddsUtils.getSCMAddressForDatanodes;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
+
 import static org.hamcrest.core.Is.is;
 import org.junit.Assert;
 import static org.junit.Assert.assertThat;
@@ -216,4 +218,39 @@ public class TestHddsUtils {
 
   }
 
-}
\ No newline at end of file
+  @Test
+  public void testGetNumberFromConfigKeys() {
+    final String testnum1 = "8";
+    final String testnum2 = "7";
+    final String serviceId = "id1";
+    final String nodeId = "scm1";
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
+        testnum1);
+    Assert.assertTrue(Integer.parseInt(testnum1) ==
+        HddsUtils.getNumberFromConfigKeys(
+            conf,
+            OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT).orElse(0));
+
+    /* Test to return first unempty key number from list */
+    /* first key is absent */
+    Assert.assertTrue(Integer.parseInt(testnum1) ==
+        HddsUtils.getNumberFromConfigKeys(
+            conf,
+            ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
+                serviceId, nodeId),
+            OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT).orElse(0));
+
+    /* now set the empty key and ensure returned value from this key */
+    conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
+            serviceId, nodeId),
+        testnum2);
+    Assert.assertTrue(Integer.parseInt(testnum2) ==
+        HddsUtils.getNumberFromConfigKeys(
+            conf,
+            ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
+                serviceId, nodeId),
+            OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT).orElse(0));
+  }
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index cdd9e52667..6ebd7e11ad 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -57,7 +57,8 @@ public final class OMConfigKeys {
   public static final String OZONE_OM_BIND_HOST_DEFAULT =
       "0.0.0.0";
   public static final int OZONE_OM_PORT_DEFAULT = 9862;
-
+  public static final String OZONE_OM_GRPC_PORT_KEY =
+      "ozone.om.grpc.port";
   public static final String OZONE_OM_HTTP_ENABLED_KEY =
       "ozone.om.http.enabled";
   public static final String OZONE_OM_HTTP_BIND_HOST_KEY =
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java
new file mode 100644
index 0000000000..498f935974
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.ha;
+
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.ha.ConfUtils;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransport;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.OptionalInt;
+
+import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+
+/**
+ * The Grpc s3gateway om transport failover proxy provider implementation
+ * extending the ozone client OM failover proxy provider.  This implmentation
+ * allows the Grpc OMTransport reuse OM failover retry policies and
+ * getRetryAction methods.  In case of OM failover, client can try
+ * connecting to another OM node from the list of proxies.
+ */
+public class GrpcOMFailoverProxyProvider<T> extends
+    OMFailoverProxyProvider<T> {
+
+  private Map<String, String> omAddresses;
+
+  public GrpcOMFailoverProxyProvider(ConfigurationSource configuration,
+                                     UserGroupInformation ugi,
+                                     String omServiceId,
+                                     Class<T> protocol) throws IOException {
+    super(configuration, ugi, omServiceId, protocol);
+  }
+
+  @Override
+  protected void loadOMClientConfigs(ConfigurationSource config, String omSvcId)
+      throws IOException {
+    // to be used for base class omProxies,
+    // ProxyInfo not applicable for gRPC, just need key set
+    Map<String, ProxyInfo<T>> omProxiesNodeIdKeyset = new HashMap<>();
+    // to be used for base class omProxyInfos
+    // OMProxyInfo not applicable for gRPC, just need key set
+    Map<String, OMProxyInfo> omProxyInfosNodeIdKeyset = new HashMap<>();
+    List<String> omNodeIDList = new ArrayList<>();
+    omAddresses = new HashMap<>();
+
+    Collection<String> omNodeIds = OmUtils.getActiveOMNodeIds(config, omSvcId);
+
+    for (String nodeId : OmUtils.emptyAsSingletonNull(omNodeIds)) {
+
+      String rpcAddrKey = ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
+          omSvcId, nodeId);
+
+      Optional<String> hostaddr = getHostNameFromConfigKeys(config,
+          rpcAddrKey);
+
+      OptionalInt hostport = HddsUtils.getNumberFromConfigKeys(config,
+          ConfUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_GRPC_PORT_KEY,
+              omSvcId, nodeId),
+          OMConfigKeys.OZONE_OM_GRPC_PORT_KEY);
+      if (nodeId == null) {
+        nodeId = OzoneConsts.OM_DEFAULT_NODE_ID;
+      }
+      omProxiesNodeIdKeyset.put(nodeId, null);
+      omProxyInfosNodeIdKeyset.put(nodeId, null);
+      if (hostaddr.isPresent()) {
+        omAddresses.put(nodeId,
+            hostaddr.get() + ":"
+                + hostport.orElse(config
+                .getObject(GrpcOmTransport
+                    .GrpcOmTransportConfig.class)
+                .getPort()));
+      } else {
+        LOG.error("expected host address not defined for: {}", rpcAddrKey);
+        throw new ConfigurationException(rpcAddrKey + "is not defined");
+      }
+      omNodeIDList.add(nodeId);
+    }
+
+    if (omProxiesNodeIdKeyset.isEmpty()) {
+      throw new IllegalArgumentException("Could not find any configured " +
+          "addresses for OM. Please configure the system with "
+          + OZONE_OM_ADDRESS_KEY);
+    }
+
+    // set base class omProxies, omProxyInfos, omNodeIDList
+
+    // omProxies needed in base class
+    // omProxies.size == number of om nodes
+    // omProxies key needs to be valid nodeid
+    // omProxyInfos keyset needed in base class
+    setProxies(omProxiesNodeIdKeyset, omProxyInfosNodeIdKeyset, omNodeIDList);
+  }
+
+  @Override
+  protected Text computeDelegationTokenService() {
+    return new Text();
+  }
+
+  // need to throw if nodeID not in omAddresses
+  public String getGrpcProxyAddress(String nodeId) throws IOException {
+    if (omAddresses.containsKey(nodeId)) {
+      return omAddresses.get(nodeId);
+    } else {
+      LOG.error("expected nodeId not found in omAddresses for proxyhost {}",
+          nodeId);
+      throw new IOException(
+          "expected nodeId not found in omAddresses for proxyhost");
+    }
+
+  }
+
+  public List<String> getGrpcOmNodeIDList() {
+    return getOmNodeIDList();
+  }
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
index 5432468452..9fb690e760 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
@@ -148,8 +148,6 @@ public class OMFailoverProxyProvider<T> implements
             rpcAddrStr);
 
         if (omProxyInfo.getAddress() != null) {
-
-
           // For a non-HA OM setup, nodeId might be null. If so, we assign it
           // the default value
           if (nodeId == null) {
@@ -551,14 +549,18 @@ public class OMFailoverProxyProvider<T> implements
     return null;
   }
 
-  @VisibleForTesting
-  protected void setProxiesForTesting(
-      Map<String, ProxyInfo<T>> testOMProxies,
-      Map<String, OMProxyInfo> testOMProxyInfos,
-      List<String> testOMNodeIDList) {
-    this.omProxies = testOMProxies;
-    this.omProxyInfos = testOMProxyInfos;
-    this.omNodeIDList = testOMNodeIDList;
+  protected void setProxies(
+      Map<String, ProxyInfo<T>> setOMProxies,
+      Map<String, OMProxyInfo> setOMProxyInfos,
+      List<String> setOMNodeIDList) {
+    this.omProxies = setOMProxies;
+    this.omProxyInfos = setOMProxyInfos;
+    this.omNodeIDList = setOMNodeIDList;
   }
+
+  protected List<String> getOmNodeIDList() {
+    return omNodeIDList;
+  }
+
 }
 
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
index 3607429e52..72c29f0cc6 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
@@ -18,22 +18,34 @@
 package org.apache.hadoop.ozone.om.protocolPB;
 
 import java.io.IOException;
-import java.util.Optional;
+import java.lang.reflect.Constructor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
+import com.google.common.net.HostAndPort;
 import io.grpc.Status;
+import io.grpc.StatusRuntimeException;
+import org.apache.hadoop.ipc.RemoteException;
+
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.security.UserGroupInformation;
 
+import org.apache.hadoop.ozone.om.ha.GrpcOMFailoverProxyProvider;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerServiceGrpc;
 import io.grpc.ManagedChannel;
 import io.grpc.netty.NettyChannelBuilder;
@@ -42,12 +54,10 @@ import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys
     .OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH;
 import static org.apache.hadoop.ozone.om.OMConfigKeys
     .OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT;
-import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
 
 /**
  * Grpc transport for grpc between s3g and om.
@@ -60,60 +70,171 @@ public class GrpcOmTransport implements OmTransport {
   private final AtomicBoolean isRunning = new AtomicBoolean(false);
 
   // gRPC specific
-  private ManagedChannel channel;
-
   private OzoneManagerServiceGrpc.OzoneManagerServiceBlockingStub client;
+  private Map<String,
+      OzoneManagerServiceGrpc.OzoneManagerServiceBlockingStub> clients;
+  private Map<String, ManagedChannel> channels;
+  private int lastVisited = -1;
+  private ConfigurationSource conf;
 
-  private String host = "om";
-  private int port = 8981;
+  //private String host = "om";
+  private AtomicReference<String> host;
   private int maxSize;
 
+  private List<String> oms;
+  private RetryPolicy retryPolicy;
+  private int failoverCount = 0;
+  private GrpcOMFailoverProxyProvider<OzoneManagerProtocolPB>
+      omFailoverProxyProvider;
+
   public GrpcOmTransport(ConfigurationSource conf,
                           UserGroupInformation ugi, String omServiceId)
       throws IOException {
-    Optional<String> omHost = getHostNameFromConfigKeys(conf,
-        OZONE_OM_ADDRESS_KEY);
-    this.host = omHost.orElse("0.0.0.0");
 
-    port = conf.getObject(GrpcOmTransportConfig.class).getPort();
+    this.channels = new HashMap<>();
+    this.clients = new HashMap<>();
+    this.conf = conf;
+    this.host = new AtomicReference();
 
     maxSize = conf.getInt(OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH,
         OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT);
 
+    omFailoverProxyProvider = new GrpcOMFailoverProxyProvider(
+        conf,
+        ugi,
+        omServiceId,
+        OzoneManagerProtocolPB.class);
+
     start();
   }
 
-  public void start() {
+  public void start() throws IOException {
+    host.set(omFailoverProxyProvider
+        .getGrpcProxyAddress(
+            omFailoverProxyProvider.getCurrentProxyOMNodeId()));
+
     if (!isRunning.compareAndSet(false, true)) {
       LOG.info("Ignore. already started.");
       return;
     }
-    NettyChannelBuilder channelBuilder =
-        NettyChannelBuilder.forAddress(host, port)
-            .usePlaintext()
-            .maxInboundMessageSize(maxSize);
 
-    channel = channelBuilder.build();
-    client = OzoneManagerServiceGrpc.newBlockingStub(channel);
+    List<String> nodes = omFailoverProxyProvider.getGrpcOmNodeIDList();
+    for (String nodeId : nodes) {
+      String hostaddr = omFailoverProxyProvider.getGrpcProxyAddress(nodeId);
+      HostAndPort hp = HostAndPort.fromString(hostaddr);
+
+      NettyChannelBuilder channelBuilder =
+          NettyChannelBuilder.forAddress(hp.getHost(), hp.getPort())
+              .usePlaintext()
+              .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
+      channels.put(hostaddr, channelBuilder.build());
+      clients.put(hostaddr,
+          OzoneManagerServiceGrpc
+              .newBlockingStub(channels.get(hostaddr)));
+    }
+    int maxFailovers = conf.getInt(
+        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
+        OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
 
+
+    retryPolicy = omFailoverProxyProvider.getRetryPolicy(maxFailovers);
     LOG.info("{}: started", CLIENT_NAME);
   }
 
   @Override
   public OMResponse submitRequest(OMRequest payload) throws IOException {
     OMResponse resp = null;
-    try {
-      resp = client.submitRequest(payload);
-    } catch (io.grpc.StatusRuntimeException e) {
-      ResultCodes resultCode = ResultCodes.INTERNAL_ERROR;
-      if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) {
-        resultCode = ResultCodes.TIMEOUT;
+    boolean tryOtherHost = true;
+    ResultCodes resultCode = ResultCodes.INTERNAL_ERROR;
+    while (tryOtherHost) {
+      tryOtherHost = false;
+      try {
+        resp = clients.get(host.get()).submitRequest(payload);
+      } catch (StatusRuntimeException e) {
+        if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) {
+          resultCode = ResultCodes.TIMEOUT;
+        }
+        Exception exp = new Exception(e);
+        tryOtherHost = shouldRetry(unwrapException(exp));
+        if (!tryOtherHost) {
+          throw new OMException(resultCode);
+        }
       }
-      throw new OMException(e.getCause(), resultCode);
     }
     return resp;
   }
 
+  private Exception unwrapException(Exception ex) {
+    Exception grpcException = null;
+    try {
+      StatusRuntimeException srexp =
+          (StatusRuntimeException)ex.getCause();
+      Status status = srexp.getStatus();
+      LOG.debug("GRPC exception wrapped: {}", status.getDescription());
+      if (status.getCode() == Status.Code.INTERNAL) {
+        // exception potentially generated by OzoneManagerServiceGrpc
+        Class<?> realClass = Class.forName(status.getDescription()
+            .substring(0, status.getDescription()
+                .indexOf(":")));
+        Class<? extends Exception> cls = realClass
+            .asSubclass(Exception.class);
+        Constructor<? extends Exception> cn = cls.getConstructor(String.class);
+        cn.setAccessible(true);
+        grpcException = cn.newInstance(status.getDescription());
+        IOException remote = null;
+        try {
+          String cause = status.getDescription();
+          cause = cause.substring(cause.indexOf(":") + 2);
+          remote = new RemoteException(cause.substring(0, cause.indexOf(":")),
+              cause.substring(cause.indexOf(":") + 1));
+          grpcException.initCause(remote);
+        } catch (Exception e) {
+          LOG.error("cannot get cause for remote exception");
+        }
+      } else {
+        // exception generated by connection failure, gRPC
+        grpcException = ex;
+      }
+    } catch (Exception e) {
+      grpcException = new IOException(e);
+      LOG.error("error unwrapping exception from OMResponse {}");
+    }
+    return grpcException;
+  }
+
+  private boolean shouldRetry(Exception ex) {
+    boolean retry = false;
+    RetryPolicy.RetryAction action = null;
+    try {
+      action = retryPolicy.shouldRetry((Exception)ex, 0, failoverCount++, true);
+      LOG.debug("grpc failover retry action {}", action.action);
+      if (action.action == RetryPolicy.RetryAction.RetryDecision.FAIL) {
+        retry = false;
+        LOG.error("Retry request failed. " + action.reason, ex);
+      } else {
+        if (action.action == RetryPolicy.RetryAction.RetryDecision.RETRY ||
+            (action.action == RetryPolicy.RetryAction.RetryDecision
+                .FAILOVER_AND_RETRY)) {
+          if (action.delayMillis > 0) {
+            try {
+              Thread.sleep(action.delayMillis);
+            } catch (Exception e) {
+              LOG.error("Error trying sleep thread for {}", action.delayMillis);
+            }
+          }
+          // switch om host to current proxy OMNodeId
+          host.set(omFailoverProxyProvider
+              .getGrpcProxyAddress(
+                  omFailoverProxyProvider.getCurrentProxyOMNodeId()));
+          retry = true;
+        }
+      }
+    } catch (Exception e) {
+      LOG.error("Failed failover exception {}", e);
+    }
+    return retry;
+  }
+
   // stub implementation for interface
   @Override
   public Text getDelegationTokenService() {
@@ -121,11 +242,15 @@ public class GrpcOmTransport implements OmTransport {
   }
 
   public void shutdown() {
-    channel.shutdown();
-    try {
-      channel.awaitTermination(5, TimeUnit.SECONDS);
-    } catch (Exception e) {
-      LOG.error("failed to shutdown OzoneManagerServiceGrpc channel", e);
+    for (Map.Entry<String, ManagedChannel> entry : channels.entrySet()) {
+      ManagedChannel channel = entry.getValue();
+      channel.shutdown();
+      try {
+        channel.awaitTermination(5, TimeUnit.SECONDS);
+      } catch (Exception e) {
+        LOG.error("failed to shutdown OzoneManagerServiceGrpc channel {} : {}",
+            entry.getKey(), e);
+      }
     }
   }
 
@@ -156,9 +281,16 @@ public class GrpcOmTransport implements OmTransport {
   }
 
   @VisibleForTesting
-  public void startClient(ManagedChannel testChannel) {
-    client = OzoneManagerServiceGrpc.newBlockingStub(testChannel);
+  public void startClient(ManagedChannel testChannel) throws IOException {
+    List<String> nodes = omFailoverProxyProvider.getGrpcOmNodeIDList();
+    for (String nodeId : nodes) {
+      String hostaddr = omFailoverProxyProvider.getGrpcProxyAddress(nodeId);
 
+      clients.put(hostaddr,
+          OzoneManagerServiceGrpc
+              .newBlockingStub(testChannel));
+    }
     LOG.info("{}: started", CLIENT_NAME);
   }
+
 }
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
index 323bb0eeb3..b427db5562 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
@@ -25,25 +25,29 @@ import static org.mockito.Mockito.mock;
 import io.grpc.inprocess.InProcessChannelBuilder;
 import io.grpc.inprocess.InProcessServerBuilder;
 import io.grpc.testing.GrpcCleanupRule;
+import io.grpc.ManagedChannel;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerServiceGrpc;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.Before;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import java.io.IOException;
 
-import io.grpc.ManagedChannel;
+import com.google.protobuf.ServiceException;
+import org.apache.ratis.protocol.RaftPeerId;
 
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
+import static org.junit.Assert.fail;
 
 /**
  * Tests for GrpcOmTransport client.
@@ -59,11 +63,32 @@ public class TestS3GrpcOmTransport {
 
   private final OMResponse omResponse = OMResponse.newBuilder()
                   .setSuccess(true)
-                  .setStatus(Status.OK)
+                  .setStatus(org.apache.hadoop.ozone.protocol
+                      .proto.OzoneManagerProtocolProtos.Status.OK)
                   .setLeaderOMNodeId(leaderOMNodeId)
                   .setCmdType(Type.AllocateBlock)
                   .build();
 
+  private boolean doFailover = false;
+
+  private OzoneConfiguration conf;
+
+  private String omServiceId;
+  private UserGroupInformation ugi;
+  private ManagedChannel channel;
+
+
+  private ServiceException createNotLeaderException() {
+    RaftPeerId raftPeerId = RaftPeerId.getRaftPeerId("testNodeId");
+
+    // TODO: Set suggest leaderID. Right now, client is not using suggest
+    // leaderID. Need to fix this.
+    OMNotLeaderException notLeaderException =
+        new OMNotLeaderException(raftPeerId);
+    LOG.debug(notLeaderException.getMessage());
+    return new ServiceException(notLeaderException);
+  }
+
   private final OzoneManagerServiceGrpc.OzoneManagerServiceImplBase
       serviceImpl =
         mock(OzoneManagerServiceGrpc.OzoneManagerServiceImplBase.class,
@@ -78,10 +103,22 @@ public class TestS3GrpcOmTransport {
                                               .OzoneManagerProtocolProtos
                                               .OMResponse>
                                           responseObserver) {
-                  responseObserver.onNext(omResponse);
-                  responseObserver.onCompleted();
+                  try {
+                    if (doFailover) {
+                      doFailover = false;
+                      throw createNotLeaderException();
+                    } else {
+                      responseObserver.onNext(omResponse);
+                      responseObserver.onCompleted();
+                    }
+                  } catch (Throwable e) {
+                    IOException ex = new IOException(e.getCause());
+                    responseObserver.onError(io.grpc.Status
+                        .INTERNAL
+                        .withDescription(ex.getMessage())
+                        .asRuntimeException());
+                  }
                 }
-
               }));
 
   private GrpcOmTransport client;
@@ -101,18 +138,37 @@ public class TestS3GrpcOmTransport {
         .start());
 
     // Create a client channel and register for automatic graceful shutdown.
-    ManagedChannel channel = grpcCleanup.register(
+    channel = grpcCleanup.register(
         InProcessChannelBuilder.forName(serverName).directExecutor().build());
 
-    String omServiceId = "";
-    OzoneConfiguration conf = new OzoneConfiguration();
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    omServiceId = "";
+    conf = new OzoneConfiguration();
+    ugi = UserGroupInformation.getCurrentUser();
+    doFailover = false;
+  }
+
+  @Test
+  public void testSubmitRequestToServer() throws Exception {
+    ServiceListRequest req = ServiceListRequest.newBuilder().build();
+
+    final OMRequest omRequest = OMRequest.newBuilder()
+        .setCmdType(Type.ServiceList)
+        .setVersion(CURRENT_VERSION)
+        .setClientId("test")
+        .setServiceListRequest(req)
+        .build();
+
     client = new GrpcOmTransport(conf, ugi, omServiceId);
     client.startClient(channel);
+
+    final OMResponse resp = client.submitRequest(omRequest);
+    Assert.assertEquals(resp.getStatus(), org.apache.hadoop.ozone.protocol
+        .proto.OzoneManagerProtocolProtos.Status.OK);
+    Assert.assertEquals(resp.getLeaderOMNodeId(), leaderOMNodeId);
   }
 
   @Test
-  public void testSubmitRequestToServer() throws Exception {
+  public void testGrpcFailoverProxy() throws Exception {
     ServiceListRequest req = ServiceListRequest.newBuilder().build();
 
     final OMRequest omRequest = OMRequest.newBuilder()
@@ -122,8 +178,45 @@ public class TestS3GrpcOmTransport {
         .setServiceListRequest(req)
         .build();
 
+    client = new GrpcOmTransport(conf, ugi, omServiceId);
+    client.startClient(channel);
+
+    doFailover = true;
+    // first invocation generates a NotALeaderException
+    // failover is performed and request is internally retried
+    // second invocation request to server succeeds
     final OMResponse resp = client.submitRequest(omRequest);
-    Assert.assertEquals(resp.getStatus(), OK);
+    Assert.assertEquals(resp.getStatus(), org.apache.hadoop.ozone.protocol
+        .proto.OzoneManagerProtocolProtos.Status.OK);
     Assert.assertEquals(resp.getLeaderOMNodeId(), leaderOMNodeId);
   }
+
+  @Test
+  public void testGrpcFailoverProxyExhaustRetry() throws Exception {
+    ServiceListRequest req = ServiceListRequest.newBuilder().build();
+
+    final OMRequest omRequest = OMRequest.newBuilder()
+        .setCmdType(Type.ServiceList)
+        .setVersion(CURRENT_VERSION)
+        .setClientId("test")
+        .setServiceListRequest(req)
+        .build();
+
+    conf.setInt(OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 0);
+    client = new GrpcOmTransport(conf, ugi, omServiceId);
+    client.startClient(channel);
+
+    doFailover = true;
+    // first invocation generates a NotALeaderException
+    // failover is performed and request is internally retried
+    // OMFailoverProvider returns Fail retry due to #attempts >
+    // max failovers
+
+    try {
+      final OMResponse resp = client.submitRequest(omRequest);
+      fail();
+    } catch (Exception e) {
+      Assert.assertTrue(true);
+    }
+  }
 }
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
index 69f4e52eae..4642680394 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
@@ -36,6 +36,7 @@ OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true
 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
 OZONE-SITE.XML_hdds.container.report.interval=60s
+OZONE-SITE.XML_ozone.om.s3.grpc.server_enabled=true
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 ASYNC_PROFILER_HOME=/opt/profiler
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
index 498d02efae..be93d0a6ec 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
@@ -51,6 +51,7 @@ OZONE-SITE.XML_hdds.grpc.tls.enabled=true
 OZONE-SITE.XML_ozone.replication=3
 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
 OZONE-SITE.XML_hdds.container.report.interval=60s
+OZONE-SITE.XML_ozone.om.s3.grpc.server_enabled=true
 
 OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m
 OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh
index 7410822cfa..252f953163 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh
@@ -35,7 +35,7 @@ execute_robot_test ${SCM} freon
 
 execute_robot_test ${SCM} basic/links.robot
 
-#execute_robot_test ${SCM} s3
+execute_robot_test ${SCM} s3
 
 execute_robot_test ${SCM} admincli
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 3269c394f7..1c772cf46b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -111,7 +111,8 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
         ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM,
         OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY,
         OMConfigKeys.OZONE_OM_HA_PREFIX,
-        OMConfigKeys.OZONE_OM_TRANSPORT_CLASS
+        OMConfigKeys.OZONE_OM_TRANSPORT_CLASS,
+        OMConfigKeys.OZONE_OM_GRPC_PORT_KEY
         // TODO HDDS-2856
     ));
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
index 60942f971b..7fe338c83e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
@@ -18,13 +18,16 @@
 package org.apache.hadoop.ozone.om;
 
 import java.io.IOException;
+import java.util.OptionalInt;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
 import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager;
 import io.grpc.Server;
@@ -47,9 +50,20 @@ public class GrpcOzoneManagerServer {
                                     omTranslator,
                                 OzoneDelegationTokenSecretManager
                                     delegationTokenMgr) {
-    this.port = config.getObject(
-        GrpcOzoneManagerServerConfig.class).
-        getPort();
+    OptionalInt haPort = HddsUtils.getNumberFromConfigKeys(config,
+        ConfUtils.addKeySuffixes(
+            OMConfigKeys.OZONE_OM_GRPC_PORT_KEY,
+            config.get(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY),
+            config.get(OMConfigKeys.OZONE_OM_NODE_ID_KEY)),
+        OMConfigKeys.OZONE_OM_GRPC_PORT_KEY);
+    if (haPort.isPresent()) {
+      this.port = haPort.getAsInt();
+    } else {
+      this.port = config.getObject(
+              GrpcOzoneManagerServerConfig.class).
+          getPort();
+    }
+
     init(omTranslator,
         delegationTokenMgr,
         config);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java
index de11608703..a88e259a28 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.ozone.om;
 
+import io.grpc.Status;
 import com.google.protobuf.RpcController;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.ipc.ClientId;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerServiceGrpc.OzoneManagerServiceImplBase;
 import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
 import org.apache.hadoop.ozone.protocol.proto
@@ -68,7 +68,6 @@ public class OzoneManagerServiceGrpc extends OzoneManagerServiceImplBase {
         "processing s3g client submit request - for command {}",
         request.getCmdType().name());
     AtomicInteger callCount = new AtomicInteger(0);
-    OMResponse omResponse = null;
 
     org.apache.hadoop.ipc.Server.getCurCall().set(new Server.Call(1,
         callCount.incrementAndGet(),
@@ -84,42 +83,16 @@ public class OzoneManagerServiceGrpc extends OzoneManagerServiceImplBase {
     // for OMRequests.  Test through successful ratis-enabled OMRequest
     // handling without dependency on hadoop IPC based Server.
     try {
-      omResponse = this.omTranslator.
+      OMResponse omResponse = this.omTranslator.
           submitRequest(NULL_RPC_CONTROLLER, request);
+      responseObserver.onNext(omResponse);
     } catch (Throwable e) {
-      IOException ioe = null;
-      Throwable se = e.getCause();
-      if (se == null) {
-        ioe = new IOException(e);
-      } else {
-        ioe = se instanceof IOException ?
-            (IOException) se : new IOException(e);
-      }
-      omResponse = createErrorResponse(
-          request,
-          ioe);
+      IOException ex = new IOException(e.getCause());
+      responseObserver.onError(Status
+          .INTERNAL
+          .withDescription(ex.getMessage())
+          .asRuntimeException());
     }
-    responseObserver.onNext(omResponse);
     responseObserver.onCompleted();
   }
-
-  /**
-   * Create OMResponse from the specified OMRequest and exception.
-   *
-   * @param omRequest
-   * @param exception
-   * @return OMResponse
-   */
-  private OMResponse createErrorResponse(
-      OMRequest omRequest, IOException exception) {
-    OMResponse.Builder omResponse = OMResponse.newBuilder()
-        .setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(exception))
-        .setCmdType(omRequest.getCmdType())
-        .setTraceID(omRequest.getTraceID())
-        .setSuccess(false);
-    if (exception.getMessage() != null) {
-      omResponse.setMessage(exception.getMessage());
-    }
-    return omResponse.build();
-  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
index 01601668b6..fe7f6f49ea 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
@@ -143,7 +143,7 @@ public class TestOMFailovers {
         omProxyInfos.put(nodeId, null);
         omNodeIDList.add(nodeId);
       }
-      setProxiesForTesting(omProxies, omProxyInfos, omNodeIDList);
+      setProxies(omProxies, omProxyInfos, omNodeIDList);
     }
 
     @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 02/08: Revert "HDDS-5545. Enable TLS for GRPC OmTransport implementation (#2945)"

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch HDDS-4440-s3-performance
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit d87a7461c1e4eb661bffa0d54d2c2faf0aa2c100
Author: Doroszlai, Attila <ad...@apache.org>
AuthorDate: Mon Apr 11 19:12:00 2022 +0200

    Revert "HDDS-5545. Enable TLS for GRPC OmTransport implementation (#2945)"
    
    This reverts commit 0b0c586fec7bf7931d09821a74bbe3cb64afc110.
---
 hadoop-ozone/common/pom.xml                        | 11 ---
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |  1 +
 .../ozone/om/protocolPB/GrpcOmTransport.java       | 39 +--------
 .../ozone/om/protocolPB/OmTransportFactory.java    |  8 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  2 +-
 hadoop-ozone/dist/src/main/license/bin/LICENSE.txt |  2 -
 hadoop-ozone/dist/src/main/license/jar-report.txt  |  2 -
 .../src/main/proto/OmClientProtocol.proto          |  2 +-
 hadoop-ozone/ozone-manager/pom.xml                 | 11 ---
 .../hadoop/ozone/om/GrpcOzoneManagerServer.java    | 46 ++---------
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  3 +-
 .../ozone/om/TestGrpcOzoneManagerServer.java       |  3 +-
 .../apache/hadoop/ozone/s3/OzoneClientCache.java   | 96 +---------------------
 .../hadoop/ozone/s3/endpoint/EndpointBase.java     |  2 +-
 .../ozone/protocolPB/TestGrpcOmTransport.java      | 13 ---
 pom.xml                                            |  3 -
 16 files changed, 22 insertions(+), 222 deletions(-)

diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index 701e6d5e6a..7c9518ab53 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -48,17 +48,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>io.netty</groupId>
       <artifactId>netty-handler-proxy</artifactId>
-    </dependency>
-      <dependency>
-        <groupId>io.netty</groupId>
-        <artifactId>netty-tcnative-boringssl-static</artifactId>
-        <version>${tcnative.version}</version>
-        <scope>runtime</scope>
-      </dependency>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-tcnative</artifactId>
-      <version>${tcnative.version}</version>
     </dependency>
     <dependency>
       <groupId>org.mockito</groupId>
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index e679404eba..6ebd7e11ad 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -307,4 +307,5 @@ public final class OMConfigKeys {
   public static final String OZONE_OM_TRANSPORT_CLASS_DEFAULT =
       "org.apache.hadoop.ozone.om.protocolPB"
           + ".Hadoop3OmTransportFactory";
+
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
index 764f51b2f6..72c29f0cc6 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
@@ -19,12 +19,11 @@ package org.apache.hadoop.ozone.om.protocolPB;
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
-import java.security.cert.X509Certificate;
-import java.util.List;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 import com.google.common.net.HostAndPort;
@@ -36,7 +35,6 @@ import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -50,9 +48,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.ozone.om.ha.GrpcOMFailoverProxyProvider;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerServiceGrpc;
 import io.grpc.ManagedChannel;
-import io.grpc.netty.GrpcSslContexts;
 import io.grpc.netty.NettyChannelBuilder;
-import io.netty.handler.ssl.SslContextBuilder;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
@@ -74,8 +70,6 @@ public class GrpcOmTransport implements OmTransport {
   private final AtomicBoolean isRunning = new AtomicBoolean(false);
 
   // gRPC specific
-  private static List<X509Certificate> caCerts = null;
-
   private OzoneManagerServiceGrpc.OzoneManagerServiceBlockingStub client;
   private Map<String,
       OzoneManagerServiceGrpc.OzoneManagerServiceBlockingStub> clients;
@@ -83,13 +77,9 @@ public class GrpcOmTransport implements OmTransport {
   private int lastVisited = -1;
   private ConfigurationSource conf;
 
+  //private String host = "om";
   private AtomicReference<String> host;
   private int maxSize;
-  private SecurityConfig secConfig;
-
-  public static void setCaCerts(List<X509Certificate> x509Certificates) {
-    caCerts = x509Certificates;
-  }
 
   private List<String> oms;
   private RetryPolicy retryPolicy;
@@ -106,7 +96,6 @@ public class GrpcOmTransport implements OmTransport {
     this.conf = conf;
     this.host = new AtomicReference();
 
-    secConfig =  new SecurityConfig(conf);
     maxSize = conf.getInt(OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH,
         OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT);
 
@@ -138,29 +127,6 @@ public class GrpcOmTransport implements OmTransport {
           NettyChannelBuilder.forAddress(hp.getHost(), hp.getPort())
               .usePlaintext()
               .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
-
-      if (secConfig.isGrpcTlsEnabled()) {
-        try {
-          SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient();
-          if (secConfig.isSecurityEnabled()) {
-            if (caCerts != null) {
-              sslContextBuilder.trustManager(caCerts);
-            } else {
-              LOG.error("x509Certicates empty");
-            }
-            channelBuilder.useTransportSecurity().
-                sslContext(sslContextBuilder.build());
-          } else {
-            LOG.error("ozone.security not enabled when TLS specified," +
-                " using plaintext");
-          }
-        } catch (Exception ex) {
-          LOG.error("cannot establish TLS for grpc om transport client");
-        }
-      } else {
-        channelBuilder.usePlaintext();
-      }
-
       channels.put(hostaddr, channelBuilder.build());
       clients.put(hostaddr,
           OzoneManagerServiceGrpc
@@ -170,6 +136,7 @@ public class GrpcOmTransport implements OmTransport {
         OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
         OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
 
+
     retryPolicy = omFailoverProxyProvider.getRetryPolicy(maxFailovers);
     LOG.info("{}: started", CLIENT_NAME);
   }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java
index 2ba8536e18..2eb11d0320 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java
@@ -46,8 +46,8 @@ public interface OmTransportFactory {
       throws IOException {
     try {
       // if configured transport class is different than the default
-      // OmTransportFactory (Hadoop3OmTransportFactory), then
-      // check service loader for transport class and instantiate it
+      // Hadoop3OmTransportFactory, then check service loader for
+      // transport class and instantiate it
       if (conf
           .get(OZONE_OM_TRANSPORT_CLASS,
               OZONE_OM_TRANSPORT_CLASS_DEFAULT) !=
@@ -61,7 +61,9 @@ public interface OmTransportFactory {
         }
       }
       return OmTransportFactory.class.getClassLoader()
-          .loadClass(OZONE_OM_TRANSPORT_CLASS_DEFAULT)
+          .loadClass(
+              "org.apache.hadoop.ozone.om.protocolPB"
+                  + ".Hadoop3OmTransportFactory")
           .asSubclass(OmTransportFactory.class)
           .newInstance();
     } catch (Exception ex) {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index e368f10732..be758a06d2 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -178,7 +178,7 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
   private OmTransport transport;
   private ThreadLocal<S3Auth> threadLocalS3Auth
       = new ThreadLocal<>();
-    
+
   private boolean s3AuthCheck;
   public OzoneManagerProtocolClientSideTranslatorPB(OmTransport omTransport,
       String clientId) {
diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
index 20281ea0c5..3439317598 100644
--- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
+++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
@@ -309,8 +309,6 @@ Apache License
    io.netty:netty-handler
    io.netty:netty-handler-proxy
    io.netty:netty-resolver
-   io.netty:netty-tcnative-boringssl-static
-   io.netty:netty-tcnative
    io.netty:netty-transport
    io.netty:netty-transport-native-epoll
    io.netty:netty-transport-native-unix-common
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index a337955656..ec949fdf33 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -172,8 +172,6 @@ share/ozone/lib/netty-common.Final.jar
 share/ozone/lib/netty-handler.Final.jar
 share/ozone/lib/netty-handler-proxy.Final.jar
 share/ozone/lib/netty-resolver.Final.jar
-share/ozone/lib/netty-tcnative-boringssl-static.Final.jar
-share/ozone/lib/netty-tcnative.Final.jar
 share/ozone/lib/netty-transport.Final.jar
 share/ozone/lib/netty-transport-native-epoll.Final.jar
 share/ozone/lib/netty-transport-native-unix-common.Final.jar
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index bbb4267a12..694d7df6b3 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1350,7 +1350,7 @@ message UpdateGetS3SecretRequest {
 }
 
 /**
-  This will be used by OM to authenticate S3 gateway requests on a per request basis.
+  This will be used by OM to authenicate S3 gateway requests on a per request basis.
 */
 message S3Authentication {
     required string stringToSign = 1;
diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml
index bc2909e686..cea140b122 100644
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@ -87,17 +87,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.bouncycastle</groupId>
       <artifactId>bcprov-jdk15on</artifactId>
     </dependency>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-tcnative</artifactId>
-      <version>${tcnative.version}</version>
-    </dependency>
-      <dependency>
-        <groupId>io.netty</groupId>
-        <artifactId>netty-tcnative-boringssl-static</artifactId>
-        <version>${tcnative.version}</version>
-        <scope>runtime</scope>
-      </dependency>
 
     <dependency>
       <groupId>org.mockito</groupId>
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
index b083378fab..7fe338c83e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
@@ -30,23 +30,11 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
 import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import io.grpc.netty.GrpcSslContexts;
-import io.grpc.netty.NettyServerBuilder;
-import io.netty.handler.ssl.SslContextBuilder;
-import io.netty.handler.ssl.SslProvider;
 import io.grpc.Server;
-
+import io.grpc.netty.NettyServerBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_PROVIDER;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_GRPC_TLS_PROVIDER_DEFAULT;
-
 /**
  * Separated network server for gRPC transport OzoneManagerService s3g->OM.
  */
@@ -61,8 +49,7 @@ public class GrpcOzoneManagerServer {
                                 OzoneManagerProtocolServerSideTranslatorPB
                                     omTranslator,
                                 OzoneDelegationTokenSecretManager
-                                    delegationTokenMgr,
-                                CertificateClient caClient) {
+                                    delegationTokenMgr) {
     OptionalInt haPort = HddsUtils.getNumberFromConfigKeys(config,
         ConfUtils.addKeySuffixes(
             OMConfigKeys.OZONE_OM_GRPC_PORT_KEY,
@@ -76,44 +63,21 @@ public class GrpcOzoneManagerServer {
               GrpcOzoneManagerServerConfig.class).
           getPort();
     }
-    
+
     init(omTranslator,
         delegationTokenMgr,
-        config,
-        caClient);
+        config);
   }
 
   public void init(OzoneManagerProtocolServerSideTranslatorPB omTranslator,
                    OzoneDelegationTokenSecretManager delegationTokenMgr,
-                   OzoneConfiguration omServerConfig,
-                   CertificateClient caClient) {
+                   OzoneConfiguration omServerConfig) {
     NettyServerBuilder nettyServerBuilder = NettyServerBuilder.forPort(port)
         .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
         .addService(new OzoneManagerServiceGrpc(omTranslator,
             delegationTokenMgr,
             omServerConfig));
 
-    SecurityConfig secConf = new SecurityConfig(omServerConfig);
-    if (secConf.isGrpcTlsEnabled()) {
-      try {
-        if (secConf.isSecurityEnabled()) {
-          SslContextBuilder sslClientContextBuilder =
-              SslContextBuilder.forServer(caClient.getPrivateKey(),
-                  caClient.getCertificate());
-          SslContextBuilder sslContextBuilder = GrpcSslContexts.configure(
-              sslClientContextBuilder,
-              SslProvider.valueOf(omServerConfig.get(HDDS_GRPC_TLS_PROVIDER,
-                  HDDS_GRPC_TLS_PROVIDER_DEFAULT)));
-          nettyServerBuilder.sslContext(sslContextBuilder.build());
-        } else {
-          LOG.error("ozone.security not enabled when TLS specified," +
-                            " creating Om S3g GRPC channel using plaintext");
-        }
-      } catch (Exception ex) {
-        LOG.error("Unable to setup TLS for secure Om S3g GRPC channel.", ex);
-      }
-    }
-
     server = nettyServerBuilder.build();
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index a02b3197a0..6297199c27 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -1107,8 +1107,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           throws IOException {
     return new GrpcOzoneManagerServer(conf,
             this.omServerProtocol,
-            this.delegationTokenMgr,
-            this.certClient);
+            this.delegationTokenMgr);
   }
 
   private static boolean isOzoneSecurityEnabled() {
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestGrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestGrpcOzoneManagerServer.java
index e58b7a47ea..b75a651eea 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestGrpcOzoneManagerServer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestGrpcOzoneManagerServer.java
@@ -49,8 +49,7 @@ public class TestGrpcOzoneManagerServer {
 
     server = new GrpcOzoneManagerServer(conf,
         omServerProtocol,
-        ozoneManager.getDelegationTokenMgr(),
-        ozoneManager.getCertificateClient());
+        ozoneManager.getDelegationTokenMgr());
 
     try {
       server.start();
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java
index 80f0b643fc..ab88b18e7e 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java
@@ -19,25 +19,15 @@ package org.apache.hadoop.ozone.s3;
 
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.om.protocol.S3Auth;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx;
-import org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransport;
-import org.apache.hadoop.ozone.OzoneSecurityUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.PreDestroy;
 import javax.enterprise.context.ApplicationScoped;
 import java.io.IOException;
-import java.security.cert.CertificateException;
-import java.util.Collections;
-import java.util.List;
-
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS_DEFAULT;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_CLIENT_PROTOCOL_VERSION;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_CLIENT_PROTOCOL_VERSION_KEY;
@@ -53,29 +43,16 @@ public final class OzoneClientCache {
   // for s3g gRPC OmTransport, OmRequest - OmResponse channel
   private static OzoneClientCache instance;
   private OzoneClient client;
-  private SecurityConfig secConfig;
 
   private OzoneClientCache(OzoneConfiguration ozoneConfiguration)
       throws IOException {
+    // S3 Gateway should always set the S3 Auth.
+    ozoneConfiguration.setBoolean(S3Auth.S3_AUTH_CHECK, true);
     // Set the expected OM version if not set via config.
     ozoneConfiguration.setIfUnset(OZONE_OM_CLIENT_PROTOCOL_VERSION_KEY,
         OZONE_OM_CLIENT_PROTOCOL_VERSION);
     String omServiceID = OmUtils.getOzoneManagerServiceId(ozoneConfiguration);
-    secConfig = new SecurityConfig(ozoneConfiguration);
-    client = null;
     try {
-      if (secConfig.isGrpcTlsEnabled()) {
-        if (ozoneConfiguration
-            .get(OZONE_OM_TRANSPORT_CLASS,
-                OZONE_OM_TRANSPORT_CLASS_DEFAULT) !=
-            OZONE_OM_TRANSPORT_CLASS_DEFAULT) {
-          // Grpc transport selected
-          // need to get certificate for TLS through
-          // hadoop rpc first via ServiceInfo
-          setCertificate(omServiceID,
-              ozoneConfiguration);
-        }
-      }
       if (omServiceID == null) {
         client = OzoneClientFactory.getRpcClient(ozoneConfiguration);
       } else {
@@ -87,8 +64,6 @@ public final class OzoneClientCache {
       LOG.warn("cannot create OzoneClient", e);
       throw e;
     }
-    // S3 Gateway should always set the S3 Auth.
-    ozoneConfiguration.setBoolean(S3Auth.S3_AUTH_CHECK, true);
   }
 
   public static OzoneClient getOzoneClientInstance(OzoneConfiguration
@@ -100,73 +75,8 @@ public final class OzoneClientCache {
     return instance.client;
   }
 
-  public static void closeClient() throws IOException {
-    if (instance != null) {
-      instance.client.close();
-      instance = null;
-    }
-  }
-
-  private void setCertificate(String omServiceID,
-                              OzoneConfiguration conf)
-      throws IOException {
-
-    // create local copy of config incase exception occurs
-    // with certificate OmRequest
-    OzoneConfiguration config = new OzoneConfiguration(conf);
-    OzoneClient certClient;
-
-    if (secConfig.isGrpcTlsEnabled()) {
-      // set OmTransport to hadoop rpc to securely,
-      // get certificates with service list request
-      config.set(OZONE_OM_TRANSPORT_CLASS,
-          OZONE_OM_TRANSPORT_CLASS_DEFAULT);
-
-      if (omServiceID == null) {
-        certClient = OzoneClientFactory.getRpcClient(config);
-      } else {
-        // As in HA case, we need to pass om service ID.
-        certClient = OzoneClientFactory.getRpcClient(omServiceID,
-            config);
-      }
-      try {
-        ServiceInfoEx serviceInfoEx = certClient
-            .getObjectStore()
-            .getClientProxy()
-            .getOzoneManagerClient()
-            .getServiceInfo();
-
-        if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
-          String caCertPem = null;
-          List<String> caCertPems = null;
-          caCertPem = serviceInfoEx.getCaCertificate();
-          caCertPems = serviceInfoEx.getCaCertPemList();
-          if (caCertPems == null || caCertPems.isEmpty()) {
-            if (caCertPem == null) {
-              LOG.error("S3g received empty caCertPems from serviceInfo");
-              throw new CertificateException("No caCerts found; caCertPem can" +
-                  " not be null when caCertPems is empty or null");
-            }
-            caCertPems = Collections.singletonList(caCertPem);
-          }
-          GrpcOmTransport.setCaCerts(OzoneSecurityUtil
-              .convertToX509(caCertPems));
-        }
-      } catch (CertificateException ce) {
-        throw new IOException(ce);
-      } catch (IOException e) {
-        throw e;
-      } finally {
-        if (certClient != null) {
-          certClient.close();
-        }
-      }
-    }
-  }
-
-
   @PreDestroy
   public void destroy() throws IOException {
-    OzoneClientCache.closeClient();
+    client.close();
   }
 }
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
index 162d0b55aa..248ee92cfd 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
@@ -58,7 +58,7 @@ public abstract class EndpointBase {
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
         throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName);
-      } else if (ex.getResult() == ResultCodes.INVALID_TOKEN) {
+      } else if (ex.getResult() == ResultCodes.S3_SECRET_NOT_FOUND) {
         throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
             s3Auth.getAccessID());
       } else if (ex.getResult() == ResultCodes.TIMEOUT ||
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java
index a28f47a809..a158e0212e 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java
@@ -58,19 +58,6 @@ public class TestGrpcOmTransport {
 
   }
 
-  @Test
-  public void testHrpcOmTransportFactory() throws Exception {
-    String omServiceId = "";
-    OzoneConfiguration conf = new OzoneConfiguration();
-
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    OmTransport omTransport = OmTransportFactory.create(conf, ugi, omServiceId);
-    // OmTransport should be Hadoop Rpc and
-    // fail equality GrpcOmTransport equality test
-    Assert.assertNotEquals(GrpcOmTransport.class.getSimpleName(),
-        omTransport.getClass().getSimpleName());
-  }
-
   @Test
   public void testStartStop() throws Exception {
     String omServiceId = "";
diff --git a/pom.xml b/pom.xml
index 9b282a68f3..11a0ad1ee9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -194,9 +194,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
 
     <netty.version>4.1.63.Final</netty.version>
     <io.grpc.version>1.38.0</io.grpc.version>
-    <tcnative.version>2.0.38.Final</tcnative.version> <!-- See table for correct version -->
-    <!-- Table for netty, grpc & tcnative version combinations  -->
-    <!-- https://github.com/grpc/grpc-java/blob/master/SECURITY.md#netty -->
 
     <!-- define the Java language version used by the compiler -->
     <javac.version>1.8</javac.version>


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 07/08: HDDS-5545. Enable TLS for GRPC OmTransport implementation (#2945)

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch HDDS-4440-s3-performance
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 47e457697e57551e01545b2a92f71913e6146c3a
Author: Neil Joshi <ne...@gmail.com>
AuthorDate: Tue Mar 29 12:11:14 2022 -0600

    HDDS-5545. Enable TLS for GRPC OmTransport implementation (#2945)
---
 hadoop-ozone/common/pom.xml                        | 11 +++
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |  1 -
 .../ozone/om/protocolPB/GrpcOmTransport.java       | 39 ++++++++-
 .../ozone/om/protocolPB/OmTransportFactory.java    |  8 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  2 +-
 hadoop-ozone/dist/src/main/license/bin/LICENSE.txt |  2 +
 hadoop-ozone/dist/src/main/license/jar-report.txt  |  2 +
 .../src/main/proto/OmClientProtocol.proto          |  2 +-
 hadoop-ozone/ozone-manager/pom.xml                 | 11 +++
 .../hadoop/ozone/om/GrpcOzoneManagerServer.java    | 46 +++++++++--
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  3 +-
 .../ozone/om/TestGrpcOzoneManagerServer.java       |  3 +-
 .../apache/hadoop/ozone/s3/OzoneClientCache.java   | 96 +++++++++++++++++++++-
 .../hadoop/ozone/s3/endpoint/EndpointBase.java     |  2 +-
 .../ozone/protocolPB/TestGrpcOmTransport.java      | 13 +++
 pom.xml                                            |  3 +
 16 files changed, 222 insertions(+), 22 deletions(-)

diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index 7c9518ab53..701e6d5e6a 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -48,6 +48,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>io.netty</groupId>
       <artifactId>netty-handler-proxy</artifactId>
+    </dependency>
+      <dependency>
+        <groupId>io.netty</groupId>
+        <artifactId>netty-tcnative-boringssl-static</artifactId>
+        <version>${tcnative.version}</version>
+        <scope>runtime</scope>
+      </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-tcnative</artifactId>
+      <version>${tcnative.version}</version>
     </dependency>
     <dependency>
       <groupId>org.mockito</groupId>
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 6ebd7e11ad..e679404eba 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -307,5 +307,4 @@ public final class OMConfigKeys {
   public static final String OZONE_OM_TRANSPORT_CLASS_DEFAULT =
       "org.apache.hadoop.ozone.om.protocolPB"
           + ".Hadoop3OmTransportFactory";
-
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
index 72c29f0cc6..764f51b2f6 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
@@ -19,11 +19,12 @@ package org.apache.hadoop.ozone.om.protocolPB;
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
+import java.security.cert.X509Certificate;
+import java.util.List;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 
 import com.google.common.net.HostAndPort;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -48,7 +50,9 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.ozone.om.ha.GrpcOMFailoverProxyProvider;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerServiceGrpc;
 import io.grpc.ManagedChannel;
+import io.grpc.netty.GrpcSslContexts;
 import io.grpc.netty.NettyChannelBuilder;
+import io.netty.handler.ssl.SslContextBuilder;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
@@ -70,6 +74,8 @@ public class GrpcOmTransport implements OmTransport {
   private final AtomicBoolean isRunning = new AtomicBoolean(false);
 
   // gRPC specific
+  private static List<X509Certificate> caCerts = null;
+
   private OzoneManagerServiceGrpc.OzoneManagerServiceBlockingStub client;
   private Map<String,
       OzoneManagerServiceGrpc.OzoneManagerServiceBlockingStub> clients;
@@ -77,9 +83,13 @@ public class GrpcOmTransport implements OmTransport {
   private int lastVisited = -1;
   private ConfigurationSource conf;
 
-  //private String host = "om";
   private AtomicReference<String> host;
   private int maxSize;
+  private SecurityConfig secConfig;
+
+  public static void setCaCerts(List<X509Certificate> x509Certificates) {
+    caCerts = x509Certificates;
+  }
 
   private List<String> oms;
   private RetryPolicy retryPolicy;
@@ -96,6 +106,7 @@ public class GrpcOmTransport implements OmTransport {
     this.conf = conf;
     this.host = new AtomicReference();
 
+    secConfig =  new SecurityConfig(conf);
     maxSize = conf.getInt(OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH,
         OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT);
 
@@ -127,6 +138,29 @@ public class GrpcOmTransport implements OmTransport {
           NettyChannelBuilder.forAddress(hp.getHost(), hp.getPort())
               .usePlaintext()
               .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
+
+      if (secConfig.isGrpcTlsEnabled()) {
+        try {
+          SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient();
+          if (secConfig.isSecurityEnabled()) {
+            if (caCerts != null) {
+              sslContextBuilder.trustManager(caCerts);
+            } else {
+              LOG.error("x509Certicates empty");
+            }
+            channelBuilder.useTransportSecurity().
+                sslContext(sslContextBuilder.build());
+          } else {
+            LOG.error("ozone.security not enabled when TLS specified," +
+                " using plaintext");
+          }
+        } catch (Exception ex) {
+          LOG.error("cannot establish TLS for grpc om transport client");
+        }
+      } else {
+        channelBuilder.usePlaintext();
+      }
+
       channels.put(hostaddr, channelBuilder.build());
       clients.put(hostaddr,
           OzoneManagerServiceGrpc
@@ -136,7 +170,6 @@ public class GrpcOmTransport implements OmTransport {
         OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
         OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
 
-
     retryPolicy = omFailoverProxyProvider.getRetryPolicy(maxFailovers);
     LOG.info("{}: started", CLIENT_NAME);
   }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java
index 2eb11d0320..2ba8536e18 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java
@@ -46,8 +46,8 @@ public interface OmTransportFactory {
       throws IOException {
     try {
       // if configured transport class is different than the default
-      // Hadoop3OmTransportFactory, then check service loader for
-      // transport class and instantiate it
+      // OmTransportFactory (Hadoop3OmTransportFactory), then
+      // check service loader for transport class and instantiate it
       if (conf
           .get(OZONE_OM_TRANSPORT_CLASS,
               OZONE_OM_TRANSPORT_CLASS_DEFAULT) !=
@@ -61,9 +61,7 @@ public interface OmTransportFactory {
         }
       }
       return OmTransportFactory.class.getClassLoader()
-          .loadClass(
-              "org.apache.hadoop.ozone.om.protocolPB"
-                  + ".Hadoop3OmTransportFactory")
+          .loadClass(OZONE_OM_TRANSPORT_CLASS_DEFAULT)
           .asSubclass(OmTransportFactory.class)
           .newInstance();
     } catch (Exception ex) {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index be758a06d2..e368f10732 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -178,7 +178,7 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
   private OmTransport transport;
   private ThreadLocal<S3Auth> threadLocalS3Auth
       = new ThreadLocal<>();
-
+    
   private boolean s3AuthCheck;
   public OzoneManagerProtocolClientSideTranslatorPB(OmTransport omTransport,
       String clientId) {
diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
index 3439317598..20281ea0c5 100644
--- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
+++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
@@ -309,6 +309,8 @@ Apache License
    io.netty:netty-handler
    io.netty:netty-handler-proxy
    io.netty:netty-resolver
+   io.netty:netty-tcnative-boringssl-static
+   io.netty:netty-tcnative
    io.netty:netty-transport
    io.netty:netty-transport-native-epoll
    io.netty:netty-transport-native-unix-common
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index ec949fdf33..a337955656 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -172,6 +172,8 @@ share/ozone/lib/netty-common.Final.jar
 share/ozone/lib/netty-handler.Final.jar
 share/ozone/lib/netty-handler-proxy.Final.jar
 share/ozone/lib/netty-resolver.Final.jar
+share/ozone/lib/netty-tcnative-boringssl-static.Final.jar
+share/ozone/lib/netty-tcnative.Final.jar
 share/ozone/lib/netty-transport.Final.jar
 share/ozone/lib/netty-transport-native-epoll.Final.jar
 share/ozone/lib/netty-transport-native-unix-common.Final.jar
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 694d7df6b3..bbb4267a12 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1350,7 +1350,7 @@ message UpdateGetS3SecretRequest {
 }
 
 /**
-  This will be used by OM to authenicate S3 gateway requests on a per request basis.
+  This will be used by OM to authenticate S3 gateway requests on a per request basis.
 */
 message S3Authentication {
     required string stringToSign = 1;
diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml
index cea140b122..bc2909e686 100644
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@ -87,6 +87,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.bouncycastle</groupId>
       <artifactId>bcprov-jdk15on</artifactId>
     </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-tcnative</artifactId>
+      <version>${tcnative.version}</version>
+    </dependency>
+      <dependency>
+        <groupId>io.netty</groupId>
+        <artifactId>netty-tcnative-boringssl-static</artifactId>
+        <version>${tcnative.version}</version>
+        <scope>runtime</scope>
+      </dependency>
 
     <dependency>
       <groupId>org.mockito</groupId>
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
index 7fe338c83e..b083378fab 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
@@ -30,11 +30,23 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
 import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager;
-import io.grpc.Server;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
+import io.grpc.netty.GrpcSslContexts;
 import io.grpc.netty.NettyServerBuilder;
+import io.netty.handler.ssl.SslContextBuilder;
+import io.netty.handler.ssl.SslProvider;
+import io.grpc.Server;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_PROVIDER;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_GRPC_TLS_PROVIDER_DEFAULT;
+
 /**
  * Separated network server for gRPC transport OzoneManagerService s3g->OM.
  */
@@ -49,7 +61,8 @@ public class GrpcOzoneManagerServer {
                                 OzoneManagerProtocolServerSideTranslatorPB
                                     omTranslator,
                                 OzoneDelegationTokenSecretManager
-                                    delegationTokenMgr) {
+                                    delegationTokenMgr,
+                                CertificateClient caClient) {
     OptionalInt haPort = HddsUtils.getNumberFromConfigKeys(config,
         ConfUtils.addKeySuffixes(
             OMConfigKeys.OZONE_OM_GRPC_PORT_KEY,
@@ -63,21 +76,44 @@ public class GrpcOzoneManagerServer {
               GrpcOzoneManagerServerConfig.class).
           getPort();
     }
-
+    
     init(omTranslator,
         delegationTokenMgr,
-        config);
+        config,
+        caClient);
   }
 
   public void init(OzoneManagerProtocolServerSideTranslatorPB omTranslator,
                    OzoneDelegationTokenSecretManager delegationTokenMgr,
-                   OzoneConfiguration omServerConfig) {
+                   OzoneConfiguration omServerConfig,
+                   CertificateClient caClient) {
     NettyServerBuilder nettyServerBuilder = NettyServerBuilder.forPort(port)
         .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
         .addService(new OzoneManagerServiceGrpc(omTranslator,
             delegationTokenMgr,
             omServerConfig));
 
+    SecurityConfig secConf = new SecurityConfig(omServerConfig);
+    if (secConf.isGrpcTlsEnabled()) {
+      try {
+        if (secConf.isSecurityEnabled()) {
+          SslContextBuilder sslClientContextBuilder =
+              SslContextBuilder.forServer(caClient.getPrivateKey(),
+                  caClient.getCertificate());
+          SslContextBuilder sslContextBuilder = GrpcSslContexts.configure(
+              sslClientContextBuilder,
+              SslProvider.valueOf(omServerConfig.get(HDDS_GRPC_TLS_PROVIDER,
+                  HDDS_GRPC_TLS_PROVIDER_DEFAULT)));
+          nettyServerBuilder.sslContext(sslContextBuilder.build());
+        } else {
+          LOG.error("ozone.security not enabled when TLS specified," +
+                            " creating Om S3g GRPC channel using plaintext");
+        }
+      } catch (Exception ex) {
+        LOG.error("Unable to setup TLS for secure Om S3g GRPC channel.", ex);
+      }
+    }
+
     server = nettyServerBuilder.build();
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 6297199c27..a02b3197a0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -1107,7 +1107,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           throws IOException {
     return new GrpcOzoneManagerServer(conf,
             this.omServerProtocol,
-            this.delegationTokenMgr);
+            this.delegationTokenMgr,
+            this.certClient);
   }
 
   private static boolean isOzoneSecurityEnabled() {
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestGrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestGrpcOzoneManagerServer.java
index b75a651eea..e58b7a47ea 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestGrpcOzoneManagerServer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestGrpcOzoneManagerServer.java
@@ -49,7 +49,8 @@ public class TestGrpcOzoneManagerServer {
 
     server = new GrpcOzoneManagerServer(conf,
         omServerProtocol,
-        ozoneManager.getDelegationTokenMgr());
+        ozoneManager.getDelegationTokenMgr(),
+        ozoneManager.getCertificateClient());
 
     try {
       server.start();
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java
index ab88b18e7e..80f0b643fc 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java
@@ -19,15 +19,25 @@ package org.apache.hadoop.ozone.s3;
 
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.om.protocol.S3Auth;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx;
+import org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransport;
+import org.apache.hadoop.ozone.OzoneSecurityUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.PreDestroy;
 import javax.enterprise.context.ApplicationScoped;
 import java.io.IOException;
+import java.security.cert.CertificateException;
+import java.util.Collections;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS_DEFAULT;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_CLIENT_PROTOCOL_VERSION;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_CLIENT_PROTOCOL_VERSION_KEY;
@@ -43,16 +53,29 @@ public final class OzoneClientCache {
   // for s3g gRPC OmTransport, OmRequest - OmResponse channel
   private static OzoneClientCache instance;
   private OzoneClient client;
+  private SecurityConfig secConfig;
 
   private OzoneClientCache(OzoneConfiguration ozoneConfiguration)
       throws IOException {
-    // S3 Gateway should always set the S3 Auth.
-    ozoneConfiguration.setBoolean(S3Auth.S3_AUTH_CHECK, true);
     // Set the expected OM version if not set via config.
     ozoneConfiguration.setIfUnset(OZONE_OM_CLIENT_PROTOCOL_VERSION_KEY,
         OZONE_OM_CLIENT_PROTOCOL_VERSION);
     String omServiceID = OmUtils.getOzoneManagerServiceId(ozoneConfiguration);
+    secConfig = new SecurityConfig(ozoneConfiguration);
+    client = null;
     try {
+      if (secConfig.isGrpcTlsEnabled()) {
+        if (ozoneConfiguration
+            .get(OZONE_OM_TRANSPORT_CLASS,
+                OZONE_OM_TRANSPORT_CLASS_DEFAULT) !=
+            OZONE_OM_TRANSPORT_CLASS_DEFAULT) {
+          // Grpc transport selected
+          // need to get certificate for TLS through
+          // hadoop rpc first via ServiceInfo
+          setCertificate(omServiceID,
+              ozoneConfiguration);
+        }
+      }
       if (omServiceID == null) {
         client = OzoneClientFactory.getRpcClient(ozoneConfiguration);
       } else {
@@ -64,6 +87,8 @@ public final class OzoneClientCache {
       LOG.warn("cannot create OzoneClient", e);
       throw e;
     }
+    // S3 Gateway should always set the S3 Auth.
+    ozoneConfiguration.setBoolean(S3Auth.S3_AUTH_CHECK, true);
   }
 
   public static OzoneClient getOzoneClientInstance(OzoneConfiguration
@@ -75,8 +100,73 @@ public final class OzoneClientCache {
     return instance.client;
   }
 
+  public static void closeClient() throws IOException {
+    if (instance != null) {
+      instance.client.close();
+      instance = null;
+    }
+  }
+
+  private void setCertificate(String omServiceID,
+                              OzoneConfiguration conf)
+      throws IOException {
+
+    // create local copy of config incase exception occurs
+    // with certificate OmRequest
+    OzoneConfiguration config = new OzoneConfiguration(conf);
+    OzoneClient certClient;
+
+    if (secConfig.isGrpcTlsEnabled()) {
+      // set OmTransport to hadoop rpc to securely,
+      // get certificates with service list request
+      config.set(OZONE_OM_TRANSPORT_CLASS,
+          OZONE_OM_TRANSPORT_CLASS_DEFAULT);
+
+      if (omServiceID == null) {
+        certClient = OzoneClientFactory.getRpcClient(config);
+      } else {
+        // As in HA case, we need to pass om service ID.
+        certClient = OzoneClientFactory.getRpcClient(omServiceID,
+            config);
+      }
+      try {
+        ServiceInfoEx serviceInfoEx = certClient
+            .getObjectStore()
+            .getClientProxy()
+            .getOzoneManagerClient()
+            .getServiceInfo();
+
+        if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
+          String caCertPem = null;
+          List<String> caCertPems = null;
+          caCertPem = serviceInfoEx.getCaCertificate();
+          caCertPems = serviceInfoEx.getCaCertPemList();
+          if (caCertPems == null || caCertPems.isEmpty()) {
+            if (caCertPem == null) {
+              LOG.error("S3g received empty caCertPems from serviceInfo");
+              throw new CertificateException("No caCerts found; caCertPem can" +
+                  " not be null when caCertPems is empty or null");
+            }
+            caCertPems = Collections.singletonList(caCertPem);
+          }
+          GrpcOmTransport.setCaCerts(OzoneSecurityUtil
+              .convertToX509(caCertPems));
+        }
+      } catch (CertificateException ce) {
+        throw new IOException(ce);
+      } catch (IOException e) {
+        throw e;
+      } finally {
+        if (certClient != null) {
+          certClient.close();
+        }
+      }
+    }
+  }
+
+
   @PreDestroy
   public void destroy() throws IOException {
-    client.close();
+    OzoneClientCache.closeClient();
   }
 }
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
index 248ee92cfd..162d0b55aa 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
@@ -58,7 +58,7 @@ public abstract class EndpointBase {
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
         throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName);
-      } else if (ex.getResult() == ResultCodes.S3_SECRET_NOT_FOUND) {
+      } else if (ex.getResult() == ResultCodes.INVALID_TOKEN) {
         throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
             s3Auth.getAccessID());
       } else if (ex.getResult() == ResultCodes.TIMEOUT ||
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java
index a158e0212e..a28f47a809 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java
@@ -58,6 +58,19 @@ public class TestGrpcOmTransport {
 
   }
 
+  @Test
+  public void testHrpcOmTransportFactory() throws Exception {
+    String omServiceId = "";
+    OzoneConfiguration conf = new OzoneConfiguration();
+
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    OmTransport omTransport = OmTransportFactory.create(conf, ugi, omServiceId);
+    // OmTransport should be Hadoop Rpc and
+    // fail equality GrpcOmTransport equality test
+    Assert.assertNotEquals(GrpcOmTransport.class.getSimpleName(),
+        omTransport.getClass().getSimpleName());
+  }
+
   @Test
   public void testStartStop() throws Exception {
     String omServiceId = "";
diff --git a/pom.xml b/pom.xml
index 11a0ad1ee9..9b282a68f3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -194,6 +194,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
 
     <netty.version>4.1.63.Final</netty.version>
     <io.grpc.version>1.38.0</io.grpc.version>
+    <tcnative.version>2.0.38.Final</tcnative.version> <!-- See table for correct version -->
+    <!-- Table for netty, grpc & tcnative version combinations  -->
+    <!-- https://github.com/grpc/grpc-java/blob/master/SECURITY.md#netty -->
 
     <!-- define the Java language version used by the compiler -->
     <javac.version>1.8</javac.version>


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 04/08: Revert "HDDS-6303. Merge from master to resolve CI workflow issues & hrpcOmTransport support (#3074)"

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch HDDS-4440-s3-performance
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 2522127b1c0bc12038265faeb667c9c0592e53bc
Author: Doroszlai, Attila <ad...@apache.org>
AuthorDate: Mon Apr 11 19:12:00 2022 +0200

    Revert "HDDS-6303.  Merge from master to resolve CI workflow issues & hrpcOmTransport support (#3074)"
    
    This reverts commit b88a45e9946a3d9dafe136324fb17db017adffb5.
---
 .github/workflows/post-commit.yml                  |   6 -
 .gitignore                                         |   3 +-
 CONTRIBUTING.md                                    |   3 +-
 dev-support/ci/selective_ci_checks.bats            |  24 +-
 dev-support/ci/selective_ci_checks.sh              |  21 +-
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |   8 +-
 .../hadoop/hdds/scm/client/HddsClientUtils.java    |   4 +-
 .../hadoop/hdds/scm/storage/BlockInputStream.java  |   8 +-
 .../hadoop/hdds/scm/storage/BlockOutputStream.java |   6 +-
 .../apache/hadoop/hdds/scm/storage/BufferPool.java |   2 +-
 .../hdds/scm/storage/RatisBlockOutputStream.java   |   2 +-
 hadoop-hdds/common/pom.xml                         |  15 -
 .../org/apache/hadoop/hdds/HddsConfigKeys.java     |   2 +-
 .../java/org/apache/hadoop/hdds/HddsUtils.java     |   2 +-
 .../java/org/apache/hadoop/hdds/StringUtils.java   |   2 +-
 .../hadoop/hdds/annotation/InterfaceAudience.java  |   6 +-
 .../org/apache/hadoop/hdds/client/OzoneQuota.java  |   2 +-
 .../org/apache/hadoop/hdds/client/QuotaList.java   |  11 +-
 .../hadoop/hdds/client/ReplicationFactor.java      |   2 +-
 .../hadoop/hdds/conf/OzoneConfiguration.java       |   9 +-
 .../hadoop/hdds/fs/CachingSpaceUsageSource.java    |   2 +-
 .../hadoop/hdds/protocol/DatanodeDetails.java      |   4 +-
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |   2 +-
 .../org/apache/hadoop/hdds/recon/ReconConfig.java  |   2 +-
 .../hadoop/hdds/scm/ByteStringConversion.java      |   2 +-
 .../java/org/apache/hadoop/hdds/scm/ScmConfig.java |   2 +-
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |   8 +-
 .../apache/hadoop/hdds/scm/client/ScmClient.java   |  24 +-
 .../hdds/scm/container/ContainerReplicaInfo.java   | 129 ---
 .../scm/container/ReplicationManagerReport.java    | 283 -------
 .../apache/hadoop/hdds/scm/net/InnerNodeImpl.java  |  14 +-
 .../hadoop/hdds/scm/net/NetworkTopologyImpl.java   |  28 +-
 .../org/apache/hadoop/hdds/scm/net/NodeSchema.java |   4 +-
 .../hadoop/hdds/scm/net/NodeSchemaLoader.java      |   8 +-
 .../apache/hadoop/hdds/scm/pipeline/Pipeline.java  |   8 +-
 .../protocol/StorageContainerLocationProtocol.java |  24 +-
 .../hdds/scm/storage/ContainerProtocolCalls.java   |   8 +-
 .../apache/hadoop/hdds/utils/HddsVersionInfo.java  |   2 +-
 .../hadoop/hdds/utils/ResourceSemaphore.java       |  10 +-
 .../org/apache/hadoop/hdds/utils/UniqueId.java     |   2 +-
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |  13 +-
 .../hadoop/ozone/audit/AuditEventStatus.java       |   2 +-
 .../org/apache/hadoop/ozone/audit/AuditLogger.java |   4 +-
 .../apache/hadoop/ozone/audit/AuditLoggerType.java |   2 +-
 .../org/apache/hadoop/ozone/audit/AuditMarker.java |   4 +-
 .../apache/hadoop/ozone/audit/AuditMessage.java    |  10 +-
 .../org/apache/hadoop/ozone/audit/SCMAction.java   |   3 +-
 .../hadoop/ozone/common/ChecksumByteBuffer.java    |   2 +-
 .../ozone/common/IncrementalChunkBuffer.java       |  13 +-
 .../apache/hadoop/ozone/common/StorageInfo.java    |   8 +-
 .../ozone/common/ha/ratis/RatisSnapshotInfo.java   |   2 +-
 .../ozone/container/common/helpers/BlockData.java  |   4 +-
 .../helpers/ContainerCommandRequestPBHelper.java   |  10 +-
 .../java/org/apache/hadoop/ozone/lease/Lease.java  |   8 +-
 .../hadoop/ozone/lease/LeaseCallbackExecutor.java  |   2 +-
 .../hadoop/ozone/util/ShutdownHookManager.java     |   2 +-
 .../common/src/main/resources/ozone-default.xml    |  59 +-
 .../java/org/apache/hadoop/hdds/TestHddsUtils.java |   4 +-
 .../hadoop/hdds/client/TestReplicationConfig.java  |   2 +-
 .../hadoop/hdds/conf/TestOzoneConfiguration.java   |  28 -
 .../java/org/apache/hadoop/hdds/fs/TestDU.java     |   2 +-
 .../ratis/TestContainerCommandRequestMessage.java  |   6 +-
 .../scm/container/TestContainerReplicaInfo.java    |  59 --
 .../container/TestReplicationManagerReport.java    | 162 ----
 .../hadoop/hdds/scm/container/package-info.java    |  21 -
 .../apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java |   4 +-
 .../hdds/scm/net/TestNetworkTopologyImpl.java      |  20 +-
 .../hadoop/hdds/scm/pipeline/MockPipeline.java     |   2 +-
 .../hadoop/hdds/tracing/TestStringCodec.java       |  18 +-
 .../hadoop/hdds/utils/MockGatheringChannel.java    |   2 +-
 .../hadoop/hdds/utils/TestResourceSemaphore.java   |   6 +-
 .../org/apache/hadoop/ozone/audit/DummyEntity.java |   2 +-
 .../hadoop/ozone/audit/TestOzoneAuditLogger.java   |   4 +-
 .../apache/hadoop/ozone/common/TestChecksum.java   |   2 +-
 .../hadoop/ozone/common/TestChunkBuffer.java       |  20 +-
 .../hadoop/ozone/common/TestStateMachine.java      |   4 +-
 .../ozone/container/ContainerTestHelper.java       |   2 +-
 .../hadoop/ozone/lease/TestLeaseManager.java       |   2 +-
 ...UpgradeTestUtils.java => TestUpgradeUtils.java} |   4 +-
 .../apache/hadoop/ozone/HddsDatanodeService.java   |   6 +-
 .../container/common/helpers/ContainerMetrics.java |   6 +-
 .../container/common/helpers/ContainerUtils.java   |   2 +-
 .../common/helpers/DatanodeVersionFile.java        |   8 +-
 ...rLayoutVersion.java => ChunkLayOutVersion.java} |  34 +-
 .../ozone/container/common/impl/ContainerData.java |  19 +-
 .../container/common/impl/ContainerDataYaml.java   |  11 +-
 .../container/common/impl/HddsDispatcher.java      |  15 +-
 .../common/impl/OpenContainerBlockMap.java         |   2 +-
 .../common/statemachine/DatanodeConfiguration.java |  34 +-
 .../common/statemachine/DatanodeStateMachine.java  |  13 +-
 .../common/statemachine/StateContext.java          |  12 +-
 .../commandhandler/CommandDispatcher.java          |   2 +-
 .../common/states/datanode/InitDatanodeState.java  |   2 +-
 .../states/endpoint/VersionEndpointTask.java       |   2 +-
 .../common/transport/server/ratis/CSMMetrics.java  |   2 +-
 .../server/ratis/ContainerStateMachine.java        |   4 +-
 .../transport/server/ratis/XceiverServerRatis.java |   4 +-
 .../container/common/utils/HddsVolumeUtil.java     |   2 +-
 .../container/common/volume/MutableVolumeSet.java  |   2 +-
 .../container/common/volume/StorageVolume.java     |   2 +-
 .../ozone/container/common/volume/VolumeUsage.java |   2 +-
 .../container/keyvalue/KeyValueContainer.java      |   6 +-
 .../container/keyvalue/KeyValueContainerCheck.java |  16 +-
 .../container/keyvalue/KeyValueContainerData.java  |  10 +-
 .../ozone/container/keyvalue/KeyValueHandler.java  |  12 +-
 .../helpers/KeyValueContainerLocationUtil.java     |   2 +-
 .../container/keyvalue/impl/BlockManagerImpl.java  |   8 +-
 .../keyvalue/impl/ChunkManagerDispatcher.java      |  18 +-
 .../keyvalue/impl/FilePerBlockStrategy.java        |   4 +-
 .../keyvalue/impl/FilePerChunkStrategy.java        |   4 +-
 .../background/BlockDeletingService.java           |   4 +-
 .../container/metadata/AbstractDatanodeStore.java  |   2 +-
 .../container/ozoneimpl/ContainerController.java   |  14 +-
 .../ozoneimpl/ContainerDataScrubberMetrics.java    |   6 +-
 .../ozoneimpl/ContainerMetadataScanner.java        |   2 +-
 .../ContainerMetadataScrubberMetrics.java          |   2 +-
 .../replication/GrpcReplicationClient.java         |  17 +-
 .../container/replication/ReplicationServer.java   |  56 +-
 .../replication/ReplicationSupervisor.java         |   8 -
 .../container/stream/DirstreamClientHandler.java   |   2 +-
 .../upgrade/DataNodeUpgradeFinalizer.java          |   2 +-
 .../upgrade/VersionedDatanodeFeatures.java         |   2 +-
 .../ozone/protocol/commands/ReregisterCommand.java |   2 +-
 .../hadoop/ozone/TestHddsSecureDatanodeInit.java   |   2 +-
 .../ozone/container/common/ContainerTestUtils.java |   4 +-
 .../hadoop/ozone/container/common/ScmTestMock.java |  35 +-
 .../container/common/TestBlockDeletingService.java |  20 +-
 ...outVersion.java => TestChunkLayOutVersion.java} |  12 +-
 .../ozone/container/common/TestContainerCache.java |   2 +-
 .../common/TestKeyValueContainerData.java          |  10 +-
 .../TestSchemaOneBackwardsCompatibility.java       |  36 +-
 .../container/common/helpers/TestBlockData.java    |   6 +-
 .../common/helpers/TestDatanodeVersionFile.java    |  10 +-
 .../common/impl/TestContainerDataYaml.java         |  18 +-
 .../impl/TestContainerDeletionChoosingPolicy.java  |   8 +-
 .../common/impl/TestContainerPersistence.java      |   8 +-
 .../container/common/impl/TestContainerSet.java    |  20 +-
 .../container/common/impl/TestHddsDispatcher.java  |  10 +-
 .../container/common/interfaces/TestHandler.java   |   2 +-
 .../common/report/TestReportPublisher.java         |   2 +-
 .../statemachine/TestDatanodeConfiguration.java    |  11 +
 .../TestCloseContainerCommandHandler.java          |  14 +-
 .../volume/TestRoundRobinVolumeChoosingPolicy.java |   2 +-
 .../common/volume/TestStorageVolumeChecker.java    |   8 +-
 .../container/common/volume/TestVolumeSet.java     |   2 +-
 ...ayoutTestInfo.java => ChunkLayoutTestInfo.java} |  24 +-
 .../keyvalue/TestKeyValueBlockIterator.java        |  24 +-
 .../container/keyvalue/TestKeyValueContainer.java  |  18 +-
 .../keyvalue/TestKeyValueContainerCheck.java       |  11 +-
 .../TestKeyValueContainerMarkUnhealthy.java        |   8 +-
 .../container/keyvalue/TestKeyValueHandler.java    |  12 +-
 .../container/keyvalue/TestTarContainerPacker.java |   8 +-
 .../keyvalue/impl/AbstractTestChunkManager.java    |   8 +-
 .../keyvalue/impl/CommonChunkManagerTestCases.java |   4 +-
 .../keyvalue/impl/TestBlockManagerImpl.java        |  10 +-
 .../keyvalue/impl/TestChunkManagerDummyImpl.java   |   6 +-
 .../keyvalue/impl/TestFilePerBlockStrategy.java    |   6 +-
 .../keyvalue/impl/TestFilePerChunkStrategy.java    |  12 +-
 .../container/ozoneimpl/TestContainerReader.java   |  16 +-
 .../container/ozoneimpl/TestOzoneContainer.java    |  10 +-
 .../replication/TestGrpcOutputStream.java          |   8 +-
 .../replication/TestReplicationConfig.java         |  75 --
 .../replication/TestReplicationSupervisor.java     |  12 +-
 .../upgrade/TestDataNodeStartupSlvLessThanMlv.java |   4 +-
 .../upgrade/TestDatanodeUpgradeToScmHA.java        |   6 +-
 hadoop-hdds/dev-support/checkstyle/checkstyle.xml  |   1 -
 hadoop-hdds/docs/content/feature/Observability.md  |   2 +-
 .../docs/content/feature/Observability.zh.md       | 217 -----
 hadoop-hdds/docs/content/security/SecuringTDE.md   |  48 +-
 .../docs/content/security/SecuringTDE.zh.md        |   4 +-
 hadoop-hdds/docs/content/tools/TestTools.md        | 129 ++-
 hadoop-hdds/docs/content/tools/TestTools.zh.md     | 129 ++-
 hadoop-hdds/docs/content/tools/_index.md           |   1 +
 hadoop-hdds/docs/content/tools/_index.zh.md        |   1 +
 .../themes/ozonedoc/layouts/shortcodes/image.html  |   2 +-
 .../SCMSecurityProtocolClientSideTranslatorPB.java |   2 +-
 ...inerLocationProtocolClientSideTranslatorPB.java |  90 +-
 .../scm/update/client/CRLClientUpdateHandler.java  |   2 +-
 .../hdds/scm/update/client/ClientCRLStore.java     |   4 +-
 .../update/client/SCMUpdateServiceGrpcClient.java  |   2 +-
 .../x509/certificate/authority/BaseApprover.java   |   2 +-
 .../certificate/authority/DefaultCAServer.java     |   6 +-
 .../authority/PKIProfiles/DefaultCAProfile.java    |   2 +-
 .../client/DefaultCertificateClient.java           |  32 +-
 .../certificate/client/OMCertificateClient.java    |   4 +-
 .../certificates/utils/CertificateSignRequest.java |  10 +-
 .../hadoop/hdds/security/x509/crl/CRLInfo.java     |   2 +-
 .../hdds/security/x509/crl/CRLInfoCodec.java       |   2 +-
 .../hadoop/hdds/server/http/ProfileServlet.java    |   2 +-
 .../hadoop/hdds/utils/DBCheckpointMetrics.java     |   2 +-
 .../java/org/apache/hadoop/hdds/utils/HAUtils.java |   2 +-
 .../hadoop/hdds/utils/MetadataKeyFilters.java      |   2 +-
 .../apache/hadoop/hdds/utils/TransactionInfo.java  |   2 +-
 .../hadoop/hdds/utils/db/DBConfigFromFile.java     |   4 +-
 .../org/apache/hadoop/hdds/utils/db/DBStore.java   |  10 -
 .../hadoop/hdds/utils/db/DBStoreBuilder.java       |   4 +-
 .../org/apache/hadoop/hdds/utils/db/RDBStore.java  |  11 -
 .../apache/hadoop/hdds/utils/db/TypedTable.java    |   2 +-
 .../hadoop/hdds/utils/db/cache/CacheKey.java       |   2 +-
 .../hadoop/hdds/utils/db/cache/EpochEntry.java     |   2 +-
 .../x509/certificate/authority/MockCAStore.java    |   2 +-
 .../x509/certificate/utils/TestCRLCodec.java       |   2 +-
 .../certificates/TestCertificateSignRequest.java   |   4 +-
 .../x509/certificates/TestRootCertificate.java     |   2 +-
 .../security/x509/keys/TestHDDSKeyGenerator.java   |   2 +-
 .../hadoop/hdds/utils/db/TestDBStoreBuilder.java   |  10 +-
 .../apache/hadoop/hdds/utils/db/TestRDBStore.java  |  48 +-
 .../hadoop/hdds/utils/db/TestRDBStoreIterator.java |   6 +-
 .../hadoop/hdds/utils/db/TestRDBTableStore.java    |   4 +-
 .../hdds/utils/db/TestTypedRDBTableStore.java      |   2 +-
 .../hadoop/hdds/utils/db/cache/TestTableCache.java |  20 +-
 .../src/main/proto/ScmAdminProtocol.proto          |  40 +-
 .../interface-client/src/main/proto/hdds.proto     |  25 -
 hadoop-hdds/server-scm/pom.xml                     |  10 +
 .../hadoop/hdds/scm/block/DeletedBlockLogImpl.java |   2 +-
 .../hdds/scm/container/ContainerReplicaCount.java  |  28 +-
 .../hdds/scm/container/ContainerReportHandler.java |   2 +-
 .../hdds/scm/container/ReplicationManager.java     |  91 +-
 .../balancer/AbstractFindTargetGreedy.java         |   6 +-
 .../scm/container/balancer/ContainerBalancer.java  |  51 +-
 .../balancer/ContainerBalancerConfiguration.java   | 131 ++-
 .../scm/container/balancer/FindSourceGreedy.java   |   6 +-
 .../ContainerPlacementPolicyFactory.java           |   2 +-
 .../algorithms/SCMContainerPlacementMetrics.java   |   2 +-
 .../algorithms/SCMContainerPlacementRackAware.java |  12 +-
 .../container/placement/metrics/SCMMetrics.java    |   2 +-
 .../replication/ReplicationManagerMetrics.java     |  42 -
 .../scm/container/states/ContainerStateMap.java    |   2 +-
 .../apache/hadoop/hdds/scm/ha/HASecurityUtils.java |   2 +-
 .../org/apache/hadoop/hdds/scm/ha/RatisUtil.java   |   2 +-
 .../hadoop/hdds/scm/ha/SCMHAInvocationHandler.java |   9 +-
 .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java       |  35 +-
 .../hadoop/hdds/scm/ha/SCMRatisServerImpl.java     |   3 +-
 .../apache/hadoop/hdds/scm/ha/SCMStateMachine.java |   6 +-
 .../apache/hadoop/hdds/scm/ha/io/CodecFactory.java |   2 +-
 .../hdds/scm/metadata/SCMMetadataStoreImpl.java    |   4 +-
 .../hdds/scm/metadata/X509CertificateCodec.java    |   2 +-
 .../apache/hadoop/hdds/scm/node/CommandQueue.java  |   2 +-
 .../hdds/scm/node/NodeDecommissionManager.java     |  32 +-
 .../apache/hadoop/hdds/scm/node/NodeManager.java   |   2 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java     |   2 +-
 .../apache/hadoop/hdds/scm/node/NodeStatus.java    |   4 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |   8 +-
 .../hadoop/hdds/scm/node/SCMNodeMetrics.java       |   8 +-
 .../hdds/scm/node/SCMNodeStorageStatMap.java       |   2 +-
 .../hdds/scm/node/states/Node2ObjectsMap.java      |   2 +-
 .../hadoop/hdds/scm/node/states/NodeStateMap.java  |   2 +-
 .../hdds/scm/pipeline/PipelineManagerImpl.java     |   2 +-
 .../hdds/scm/pipeline/PipelineReportHandler.java   |   4 +-
 .../scm/pipeline/PipelineStateManagerImpl.java     |   7 +-
 .../hdds/scm/pipeline/RatisPipelineUtils.java      |   2 +-
 .../scm/pipeline/WritableContainerFactory.java     |   2 +-
 ...inerLocationProtocolServerSideTranslatorPB.java |  86 +-
 .../hdds/scm/safemode/ContainerSafeModeRule.java   |   4 +-
 .../hdds/scm/safemode/DataNodeSafeModeRule.java    |   2 +-
 .../hdds/scm/server/SCMBlockProtocolServer.java    |  16 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |  90 +-
 .../hdds/scm/server/SCMDatanodeProtocolServer.java |   6 +-
 .../hdds/scm/server/StorageContainerManager.java   |  12 +-
 .../server/StorageContainerManagerHttpServer.java  |   4 +-
 .../scm/server/StorageContainerManagerStarter.java |   2 +-
 .../org/apache/hadoop/hdds/scm/HddsTestUtils.java  | 692 +---------------
 .../apache/hadoop/hdds/scm/TestHddsServerUtil.java |   2 +-
 .../hadoop/hdds/scm/TestHddsServerUtils.java       |   2 +-
 .../scm/{HddsTestUtils.java => TestUtils.java}     |  84 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java    |  22 +-
 .../hadoop/hdds/scm/block/TestDeletedBlockLog.java |   4 +-
 .../command/TestCommandStatusReportHandler.java    |   4 +-
 .../hadoop/hdds/scm/container/MockNodeManager.java |  12 +-
 .../hdds/scm/container/SimpleMockNodeManager.java  |   2 +-
 .../container/TestCloseContainerEventHandler.java  |   4 +-
 .../scm/container/TestContainerManagerImpl.java    |   6 +-
 .../scm/container/TestContainerReportHandler.java  |   4 +-
 .../TestIncrementalContainerReportHandler.java     |   8 +-
 .../hdds/scm/container/TestReplicationManager.java | 165 +---
 .../scm/container/TestUnknownContainerReport.java  |   2 +-
 .../container/balancer/TestContainerBalancer.java  | 123 +--
 .../algorithms/TestContainerPlacementFactory.java  |  12 +-
 .../TestSCMContainerPlacementCapacity.java         |  12 +-
 .../TestSCMContainerPlacementRackAware.java        |  24 +-
 .../TestSCMContainerPlacementRandom.java           |  16 +-
 .../replication/TestReplicationManagerMetrics.java |  97 ---
 .../states/TestContainerReplicaCount.java          |  10 -
 .../hdds/scm/crl/TestCRLStatusReportHandler.java   |   4 +-
 .../hadoop/hdds/scm/ha/TestSCMHAConfiguration.java |  26 +-
 .../hadoop/hdds/scm/ha/TestSCMRatisRequest.java    |   2 +-
 .../hdds/scm/metadata/TestPipelineIDCodec.java     |   2 +-
 .../hdds/scm/node/TestContainerPlacement.java      |   8 +-
 .../hdds/scm/node/TestDatanodeAdminMonitor.java    |   4 +-
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  |  43 +-
 .../hdds/scm/node/TestNodeDecommissionManager.java |  16 +-
 .../hdds/scm/node/TestNodeReportHandler.java       |  10 +-
 .../hadoop/hdds/scm/node/TestNodeStateManager.java |   2 +-
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |  82 +-
 .../hdds/scm/node/TestSCMNodeStorageStatMap.java   |  15 +-
 .../hadoop/hdds/scm/node/TestStatisticsUpdate.java |  16 +-
 .../hdds/scm/node/states/TestNodeStateMap.java     |   4 +-
 .../TestPipelineDatanodesIntersection.java         |   4 +-
 .../hdds/scm/pipeline/TestPipelineManagerImpl.java |  11 +-
 .../scm/pipeline/TestPipelinePlacementPolicy.java  |  16 +-
 .../scm/pipeline/TestRatisPipelineProvider.java    |   4 +-
 .../TestOneReplicaPipelineSafeModeRule.java        |   8 +-
 .../hdds/scm/safemode/TestSCMSafeModeManager.java  |   8 +-
 .../scm/server/TestSCMBlockProtocolServer.java     |   4 +-
 .../hadoop/hdds/scm/server/TestSCMCertStore.java   |   6 +-
 .../server/TestSCMUpdateServiceGrpcServer.java     |  24 +-
 .../TestSCMHAUnfinalizedStateValidationAction.java |   5 +-
 .../scm/upgrade/TestScmStartupSlvLessThanMlv.java  |   6 +-
 .../ozone/container/common/TestEndPoint.java       |  23 +-
 .../testutils/ReplicationNodeManagerMock.java      |   2 +-
 .../hadoop/ozone/scm/node/TestSCMNodeMetrics.java  |  11 +-
 .../org/apache/ozone/test/LambdaTestUtils.java     |   4 +-
 hadoop-hdds/tools/pom.xml                          |   9 -
 .../hdds/scm/cli/ContainerBalancerCommands.java    |  19 +-
 .../scm/cli/ContainerBalancerStartSubcommand.java  |  50 +-
 .../scm/cli/ContainerBalancerStatusSubcommand.java |   2 +-
 .../hdds/scm/cli/ContainerOperationClient.java     |  34 +-
 .../cli/ReplicationManagerStatusSubcommand.java    |   2 +-
 .../hdds/scm/cli/SafeModeCheckSubcommand.java      |   2 +-
 .../hdds/scm/cli/SafeModeExitSubcommand.java       |   2 +-
 .../hdds/scm/cli/container/ContainerCommands.java  |   3 +-
 .../hdds/scm/cli/container/InfoSubcommand.java     |  61 +-
 .../hdds/scm/cli/container/ReportSubcommand.java   | 116 ---
 .../scm/cli/datanode/DecommissionSubCommand.java   |   2 +-
 .../scm/cli/datanode/MaintenanceSubCommand.java    |   4 +-
 .../scm/cli/datanode/RecommissionSubCommand.java   |   2 +-
 .../hdds/scm/cli/container/TestInfoSubCommand.java | 249 ------
 .../scm/cli/container/TestReportSubCommand.java    | 159 ----
 .../datanode/TestContainerBalancerSubCommand.java  |   8 +-
 .../scm/cli/datanode/TestListInfoSubcommand.java   |   6 +-
 hadoop-ozone/client/pom.xml                        |   4 -
 .../apache/hadoop/ozone/client/ObjectStore.java    |   6 +-
 .../apache/hadoop/ozone/client/OzoneBucket.java    |  21 +-
 .../hadoop/ozone/client/OzoneClientFactory.java    |   2 +-
 .../org/apache/hadoop/ozone/client/OzoneKey.java   |   2 +-
 .../apache/hadoop/ozone/client/OzoneVolume.java    |   2 +-
 .../checksum/AbstractBlockChecksumComputer.java    |  43 -
 .../client/checksum/BaseFileChecksumHelper.java    | 200 -----
 .../checksum/ReplicatedBlockChecksumComputer.java  |  72 --
 .../checksum/ReplicatedFileChecksumHelper.java     | 187 -----
 .../hadoop/ozone/client/checksum/package-info.java |  23 -
 .../ozone/client/io/BlockOutputStreamEntry.java    |   6 +-
 .../hadoop/ozone/client/io/KeyInputStream.java     |   8 +-
 .../ozone/client/protocol/ClientProtocol.java      |  27 -
 .../hadoop/ozone/client/rpc/OzoneKMSUtil.java      |   4 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  | 116 +--
 .../hadoop/ozone/client/TestHddsClientUtils.java   |   4 +-
 .../TestReplicatedBlockChecksumComputer.java       |  68 --
 .../checksum/TestReplicatedFileChecksumHelper.java | 323 --------
 .../hadoop/ozone/client/checksum/package-info.java |  23 -
 .../client/src/test/resources/log4j.properties     |  23 +
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |   4 +-
 .../java/org/apache/hadoop/ozone/OzoneAcl.java     |   8 +-
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |  11 +-
 .../hadoop/ozone/om/helpers/OMNodeDetails.java     |   6 +-
 .../hadoop/ozone/om/helpers/OmBucketArgs.java      |  46 +-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java      |  11 +-
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |   2 +-
 .../hadoop/ozone/om/helpers/OmKeyLocationInfo.java |   2 +-
 .../ozone/om/helpers/OmKeyLocationInfoGroup.java   |   4 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java      |   2 +-
 .../hadoop/ozone/om/helpers/OzoneAclUtil.java      |   4 +-
 .../hadoop/ozone/om/helpers/OzoneFSUtils.java      |   2 +-
 .../hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java |   6 +-
 .../hadoop/ozone/om/helpers/ServiceInfo.java       |   2 +-
 .../hadoop/ozone/om/helpers/WithObjectID.java      |   2 +-
 .../ozone/om/protocol/OzoneManagerProtocol.java    |  12 -
 .../ozone/om/protocolPB/OmTransportFactory.java    |  30 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  30 +-
 .../apache/hadoop/ozone/protocolPB/OMPBHelper.java |  14 +-
 .../hadoop/ozone/security/acl/OzoneObjInfo.java    |   2 +-
 .../apache/hadoop/ozone/util/OzoneVersionInfo.java |   2 +-
 .../org/apache/hadoop/ozone/util/RadixTree.java    |   2 +-
 .../ozone/om/ha/TestOMFailoverProxyProvider.java   |   2 +-
 .../hadoop/ozone/om/lock/TestOzoneManagerLock.java |   2 +-
 .../ozone/security/TestGDPRSymmetricKey.java       |   2 +-
 .../ozone/security/acl/TestOzoneObjInfo.java       |   4 +-
 .../apache/hadoop/ozone/util/TestRadixTree.java    |   4 +-
 hadoop-ozone/dev-support/checks/bats.sh            |   8 +-
 hadoop-ozone/dev-support/checks/checkstyle.sh      |   3 -
 hadoop-ozone/dev-support/checks/coverage.sh        |   1 +
 hadoop-ozone/dist/pom.xml                          |   2 +-
 .../dist/src/main/compose/ozone-csi/docker-config  |   1 -
 .../dist/src/main/compose/ozone-ha/docker-config   |   4 -
 .../dist/src/main/compose/ozone/docker-config      |   1 -
 hadoop-ozone/dist/src/main/compose/ozone/test.sh   |   6 +
 .../compose/ozonesecure-ha/keytabs/HTTP.keytab     | Bin 0 -> 144 bytes
 .../main/compose/ozonesecure-ha/keytabs/dn.keytab  | Bin 0 -> 278 bytes
 .../main/compose/ozonesecure-ha/keytabs/om.keytab  | Bin 0 -> 278 bytes
 .../compose/ozonesecure-ha/keytabs/recon.keytab    | Bin 0 -> 296 bytes
 .../main/compose/ozonesecure-ha/keytabs/s3g.keytab | Bin 0 -> 434 bytes
 .../main/compose/ozonesecure-ha/keytabs/scm.keytab | Bin 0 -> 586 bytes
 .../compose/ozonesecure-ha/keytabs/testuser.keytab | Bin 0 -> 152 bytes
 .../ozonesecure-ha/keytabs/testuser2.keytab        | Bin 0 -> 154 bytes
 .../src/main/compose/ozonesecure-mr/docker-config  |   1 -
 .../compose/ozonesecure-mr/keytabs/HTTP.keytab     | Bin 0 -> 144 bytes
 .../main/compose/ozonesecure-mr/keytabs/dn.keytab  | Bin 0 -> 278 bytes
 .../main/compose/ozonesecure-mr/keytabs/om.keytab  | Bin 0 -> 278 bytes
 .../compose/ozonesecure-mr/keytabs/recon.keytab    | Bin 0 -> 296 bytes
 .../main/compose/ozonesecure-mr/keytabs/s3g.keytab | Bin 0 -> 434 bytes
 .../main/compose/ozonesecure-mr/keytabs/scm.keytab | Bin 0 -> 586 bytes
 .../compose/ozonesecure-mr/keytabs/testuser.keytab | Bin 0 -> 152 bytes
 .../ozonesecure-mr/keytabs/testuser2.keytab        | Bin 0 -> 154 bytes
 .../dist/src/main/compose/ozonesecure-mr/test.sh   |   4 +-
 .../src/main/compose/ozonesecure/docker-config     |   2 -
 .../main/compose/ozonesecure/keytabs/HTTP.keytab   | Bin 0 -> 144 bytes
 .../src/main/compose/ozonesecure/keytabs/dn.keytab | Bin 0 -> 278 bytes
 .../src/main/compose/ozonesecure/keytabs/om.keytab | Bin 0 -> 278 bytes
 .../main/compose/ozonesecure/keytabs/recon.keytab  | Bin 0 -> 296 bytes
 .../main/compose/ozonesecure/keytabs/s3g.keytab    | Bin 0 -> 434 bytes
 .../main/compose/ozonesecure/keytabs/scm.keytab    | Bin 0 -> 586 bytes
 .../compose/ozonesecure/keytabs/testuser.keytab    | Bin 0 -> 152 bytes
 .../compose/ozonesecure/keytabs/testuser2.keytab   | Bin 0 -> 154 bytes
 .../main/compose/upgrade/compose/ha/docker-config  |   4 +-
 .../compose/upgrade/compose/non-ha/docker-config   |   2 +-
 hadoop-ozone/dist/src/main/docker/Dockerfile       |   1 +
 .../dist/src/main/dockerlibexec/entrypoint.sh      |   2 +-
 hadoop-ozone/dist/src/main/k8s/examples/testlib.sh |   5 +-
 .../src/main/smoketest/compatibility/read.robot    |   9 +-
 .../src/main/smoketest/compatibility/setup.robot   |  28 -
 .../src/main/smoketest/compatibility/write.robot   |   7 +-
 hadoop-ozone/dist/src/shell/ozone/ozone            |  17 +
 .../apache/hadoop/ozone/MiniOzoneChaosCluster.java |  49 +-
 .../hadoop/ozone/MiniOzoneLoadGenerator.java       |   2 +-
 .../hadoop/ozone/loadgenerators/LoadBucket.java    |   2 +-
 hadoop-ozone/insight/pom.xml                       |   1 +
 hadoop-ozone/integration-test/pom.xml              |  10 +
 .../fs/ozone/TestOzoneFSWithObjectStoreCreate.java |  14 +-
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   |   2 +-
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  44 +-
 .../apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java  |   8 +-
 .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java |  34 +-
 .../contract/ITestOzoneContractDistCpWithFSO.java  |  65 --
 .../fs/ozone/contract/ITestOzoneContractUtils.java |   4 +-
 .../hadoop/fs/ozone/contract/OzoneContract.java    |  22 +-
 .../metrics/TestSCMContainerManagerMetrics.java    |   2 -
 .../hdds/scm/pipeline/TestLeaderChoosePolicy.java  |   2 +-
 .../hdds/scm/pipeline/TestMultiRaftSetup.java      |   3 +-
 .../hdds/scm/pipeline/TestPipelineClose.java       |   4 +-
 .../TestRatisPipelineCreateAndDestroy.java         |   2 +-
 .../safemode/TestSCMSafeModeWithPipelineRules.java |   6 +-
 .../hadoop/hdds/upgrade/TestHDDSUpgrade.java       |   8 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |   2 +-
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |  24 +-
 .../hadoop/ozone/MiniOzoneClusterProvider.java     |   4 +-
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java       | 378 +++------
 .../hadoop/ozone/MiniOzoneOMHAClusterImpl.java     | 116 +++
 .../org/apache/hadoop/ozone/OzoneTestUtils.java    |  25 +-
 .../ozone/TestContainerBalancerOperations.java     |  23 +-
 .../hadoop/ozone/TestContainerOperations.java      |   2 +-
 .../java/org/apache/hadoop/ozone/TestDataUtil.java |   2 +-
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |   6 +-
 .../hadoop/ozone/TestMiniOzoneOMHACluster.java     |   6 +-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   3 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java       |  37 +-
 .../hadoop/ozone/TestStorageContainerManager.java  |  11 +-
 .../ozone/client/CertificateClientTestImpl.java    |   4 +-
 .../apache/hadoop/ozone/client/rpc/TestBCSID.java  |   7 +-
 .../hadoop/ozone/client/rpc/TestCommitWatcher.java |   2 +-
 .../client/rpc/TestContainerStateMachine.java      |   5 +-
 .../rpc/TestContainerStateMachineFailures.java     | 161 +---
 .../rpc/TestContainerStateMachineFlushDelay.java   |   4 +-
 .../client/rpc/TestDeleteWithSlowFollower.java     |  11 +-
 .../client/rpc/TestDiscardPreallocatedBlocks.java  |   2 +-
 .../client/rpc/TestFailureHandlingByClient.java    |   2 +-
 .../client/rpc/TestOzoneAtRestEncryption.java      |  22 +-
 .../rpc/TestOzoneClientMultipartUploadWithFSO.java |  56 +-
 .../rpc/TestOzoneClientRetriesOnExceptions.java    |   4 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     | 175 ++--
 .../rpc/TestOzoneRpcClientForAclAuditLog.java      |  18 +-
 .../TestOzoneRpcClientWithKeyLatestVersion.java    |   2 +-
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |   6 +-
 .../ozone/client/rpc/TestSecureOzoneRpcClient.java |  12 +-
 .../ozone/client/rpc/TestWatchForCommit.java       |   2 +-
 .../client/rpc/read/TestChunkInputStream.java      |   4 +-
 .../ozone/client/rpc/read/TestInputStreamBase.java |  17 +-
 .../ozone/client/rpc/read/TestKeyInputStream.java  |  12 +-
 .../apache/hadoop/ozone/container/TestHelper.java  |   7 +-
 .../commandhandler/TestBlockDeletion.java          |   1 -
 .../TestCloseContainerByPipeline.java              |   2 +-
 .../commandhandler/TestCloseContainerHandler.java  |   4 +-
 .../commandhandler/TestDeleteContainerHandler.java |   5 +-
 .../container/metrics/TestContainerMetrics.java    |   5 +-
 .../container/ozoneimpl/TestOzoneContainer.java    |   2 +-
 .../container/server/TestContainerServer.java      |   3 +-
 .../server/TestSecureContainerServer.java          |   7 +-
 .../ozone/dn/ratis/TestDnRatisLogParser.java       |   2 -
 .../hadoop/ozone/dn/scrubber/TestDataScrubber.java |   7 +-
 .../TestDatanodeHddsVolumeFailureDetection.java    |   9 +-
 .../freon/TestHadoopDirTreeGeneratorWithFSO.java   |   4 +-
 .../ozone/freon/TestHadoopNestedDirGenerator.java  |  26 +-
 .../ozone/om/TestContainerReportWithKeys.java      |   2 +-
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 387 +++++----
 .../org/apache/hadoop/ozone/om/TestKeyPurging.java |   2 +-
 .../hadoop/ozone/om/TestOMRatisSnapshots.java      |   6 +-
 .../hadoop/ozone/om/TestOMUpgradeFinalization.java |   6 +-
 .../hadoop/ozone/om/TestObjectStoreWithFSO.java    |   8 +-
 .../org/apache/hadoop/ozone/om/TestOmLDBCli.java   |  18 +-
 .../org/apache/hadoop/ozone/om/TestOmMetrics.java  |   2 +-
 .../ozone/om/TestOmStartupSlvLessThanMlv.java      |   6 +-
 .../ozone/om/TestOzoneManagerConfiguration.java    |   4 +-
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java |  12 +-
 .../ozone/om/TestOzoneManagerHAMetadataOnly.java   |   2 +-
 .../hadoop/ozone/om/TestOzoneManagerHAWithACL.java |   8 +-
 .../ozone/om/TestOzoneManagerHAWithData.java       |   4 +-
 .../ozone/om/TestOzoneManagerHAWithFailover.java   |   2 +-
 .../hadoop/ozone/om/TestOzoneManagerPrepare.java   |   2 +-
 .../ozone/om/TestOzoneManagerRestInterface.java    |   2 +-
 .../hadoop/ozone/om/TestRecursiveAclWithFSO.java   |   4 +-
 .../om/ratis/TestOzoneManagerRatisRequest.java     |   4 +-
 .../snapshot/TestOzoneManagerSnapshotProvider.java |   8 +-
 .../hadoop/ozone/recon/TestReconScmHASnapshot.java |  70 --
 .../hadoop/ozone/recon/TestReconScmSnapshot.java   | 133 ---
 .../ozone/recon/TestReconWithOzoneManager.java     |  10 +-
 .../ozone/recon/TestReconWithOzoneManagerFSO.java  |  14 +-
 .../ozone/recon/TestReconWithOzoneManagerHA.java   |  12 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java    |   2 +-
 .../hadoop/ozone/scm/TestCloseContainer.java       |   4 +-
 .../hadoop/ozone/scm/TestFailoverWithSCMHA.java    |   2 +-
 .../ozone/scm/TestSCMInstallSnapshotWithHA.java    |  18 +-
 .../org/apache/hadoop/ozone/scm/TestSCMMXBean.java |   6 +-
 .../ozone/scm/TestStorageContainerManagerHA.java   |   4 +-
 .../hadoop/ozone/scm/TestXceiverClientGrpc.java    |  10 +-
 .../scm/node/TestDecommissionAndMaintenance.java   |  24 +-
 .../hadoop/ozone/scm/node/TestQueryNode.java       |   2 +-
 .../hadoop/ozone/shell/TestNSSummaryAdmin.java     |   4 +-
 .../hadoop/ozone/shell/TestOzoneShellHA.java       |  16 +-
 .../src/main/proto/OmClientProtocol.proto          |   4 +-
 .../hadoop/ozone/om/codec/TestOmKeyInfoCodec.java  |   4 +-
 .../ozone/om/codec/TestRepeatedOmKeyInfoCodec.java |   4 +-
 .../ozone/om/helpers/TestInstanceHelper.java       |  68 ++
 .../hadoop/ozone/om/helpers/TestOmPrefixInfo.java  |  39 +-
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |   2 +-
 .../org/apache/hadoop/ozone/om/KeyManager.java     | 111 +++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 922 ++++++++++++++++++++-
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |   8 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  22 +-
 .../org/apache/hadoop/ozone/om/OzoneAclUtils.java  |   2 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  18 +-
 .../hadoop/ozone/om/OzoneManagerHttpServer.java    |   4 +-
 .../hadoop/ozone/om/OzoneManagerPrepareState.java  |   4 +-
 .../hadoop/ozone/om/OzoneManagerStarter.java       |   2 +-
 .../hadoop/ozone/om/S3SecretManagerImpl.java       |   2 +-
 .../hadoop/ozone/om/TrashOzoneFileSystem.java      |  12 +-
 .../apache/hadoop/ozone/om/TrashPolicyOzone.java   |  12 +-
 .../apache/hadoop/ozone/om/fs/OzoneManagerFS.java  |   6 +
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java   |   4 +-
 .../ozone/om/ratis/OzoneManagerStateMachine.java   |   6 +-
 .../om/ratis/utils/OzoneManagerRatisUtils.java     |   9 +-
 .../hadoop/ozone/om/request/OMClientRequest.java   |   6 +-
 .../om/request/bucket/OMBucketCreateRequest.java   |   6 +-
 .../om/request/bucket/OMBucketSetOwnerRequest.java | 199 -----
 .../request/bucket/OMBucketSetPropertyRequest.java |   6 +-
 .../request/bucket/acl/OMBucketSetAclRequest.java  |   2 +-
 .../om/request/file/OMDirectoryCreateRequest.java  |   2 +-
 .../ozone/om/request/file/OMFileCreateRequest.java |   2 +-
 .../ozone/om/request/file/OMFileRequest.java       |  10 +-
 .../ozone/om/request/key/OMKeyCommitRequest.java   |   4 +-
 .../ozone/om/request/key/OMKeyCreateRequest.java   |   2 +-
 .../ozone/om/request/key/OMKeyRenameRequest.java   |   2 +-
 .../om/request/key/OMKeyRenameRequestWithFSO.java  |   2 +-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |   8 +-
 .../ozone/om/request/key/acl/OMKeyAclRequest.java  |   3 +-
 .../multipart/S3MultipartUploadAbortRequest.java   |   2 +-
 .../om/request/volume/OMVolumeSetQuotaRequest.java |   6 +-
 .../om/request/volume/acl/OMVolumeAclRequest.java  |   2 +-
 .../request/volume/acl/OMVolumeAddAclRequest.java  |   2 +-
 .../volume/acl/OMVolumeRemoveAclRequest.java       |   2 +-
 .../request/volume/acl/OMVolumeSetAclRequest.java  |   2 +-
 .../response/bucket/OMBucketSetOwnerResponse.java  |  80 --
 .../key/OMOpenKeysDeleteRequest.java               |   4 +-
 .../OzoneDelegationTokenSecretManager.java         |   2 +-
 .../hadoop/ozone/security/OzoneSecretStore.java    |   4 +-
 .../org/apache/hadoop/ozone/om/OmTestManagers.java | 122 ---
 .../hadoop/ozone/om/TestBucketManagerImpl.java     |   6 +-
 .../hadoop/ozone/om/TestKeyDeletingService.java    | 106 +--
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java |  72 +-
 .../apache/hadoop/ozone/om/TestOMDBDefinition.java |   2 +-
 .../hadoop/ozone/om/TestOmMetadataManager.java     |  50 +-
 .../apache/hadoop/ozone/om/TestTrashService.java   |  40 +-
 .../hadoop/ozone/om/failover/TestOMFailovers.java  |   2 +-
 ...tOzoneManagerDoubleBufferWithDummyResponse.java |   2 +-
 ...TestOzoneManagerDoubleBufferWithOMResponse.java |  12 +-
 .../om/ratis/TestOzoneManagerRatisServer.java      |   2 +-
 .../om/ratis/TestOzoneManagerStateMachine.java     |   2 +-
 .../request/TestOMClientRequestWithUserInfo.java   |   2 +-
 ...questTestUtils.java => TestOMRequestUtils.java} |  10 +-
 .../request/bucket/TestOMBucketCreateRequest.java  |   8 +-
 .../bucket/TestOMBucketCreateRequestWithFSO.java   |   4 +-
 .../request/bucket/TestOMBucketDeleteRequest.java  |   6 +-
 .../bucket/TestOMBucketSetPropertyRequest.java     |   8 +-
 .../bucket/acl/TestOMBucketAddAclRequest.java      |  12 +-
 .../bucket/acl/TestOMBucketRemoveAclRequest.java   |  14 +-
 .../bucket/acl/TestOMBucketSetAclRequest.java      |  12 +-
 .../request/file/TestOMDirectoryCreateRequest.java |  24 +-
 .../file/TestOMDirectoryCreateRequestWithFSO.java  |  48 +-
 .../om/request/file/TestOMFileCreateRequest.java   |  36 +-
 .../file/TestOMFileCreateRequestWithFSO.java       |  22 +-
 .../om/request/key/TestOMAllocateBlockRequest.java |  12 +-
 .../key/TestOMAllocateBlockRequestWithFSO.java     |  10 +-
 .../ozone/om/request/key/TestOMKeyAclRequest.java  |  10 +-
 .../om/request/key/TestOMKeyAclRequestWithFSO.java |   8 +-
 .../om/request/key/TestOMKeyCommitRequest.java     |  16 +-
 .../request/key/TestOMKeyCommitRequestWithFSO.java |  10 +-
 .../om/request/key/TestOMKeyCreateRequest.java     |  10 +-
 .../request/key/TestOMKeyCreateRequestWithFSO.java |   6 +-
 .../om/request/key/TestOMKeyDeleteRequest.java     |  10 +-
 .../request/key/TestOMKeyDeleteRequestWithFSO.java |  14 +-
 .../key/TestOMKeyPurgeRequestAndResponse.java      |  10 +-
 .../om/request/key/TestOMKeyRenameRequest.java     |  15 +-
 .../om/request/key/TestOMKeysDeleteRequest.java    |   6 +-
 .../om/request/key/TestOMKeysRenameRequest.java    |   6 +-
 .../request/key/TestOMOpenKeysDeleteRequest.java   |  13 +-
 .../om/request/key/TestOMPrefixAclRequest.java     |   6 +-
 .../TestS3InitiateMultipartUploadRequest.java      |   6 +-
 ...estS3InitiateMultipartUploadRequestWithFSO.java |   4 +-
 .../s3/multipart/TestS3MultipartRequest.java       |  12 +-
 .../TestS3MultipartUploadAbortRequest.java         |   8 +-
 .../TestS3MultipartUploadAbortRequestWithFSO.java  |   4 +-
 .../TestS3MultipartUploadCommitPartRequest.java    |  12 +-
 ...tS3MultipartUploadCommitPartRequestWithFSO.java |  10 +-
 .../TestS3MultipartUploadCompleteRequest.java      |  14 +-
 ...estS3MultipartUploadCompleteRequestWithFSO.java |   8 +-
 .../upgrade/TestOMCancelPrepareRequest.java        |   2 +-
 .../request/volume/TestOMVolumeCreateRequest.java  |   6 +-
 .../request/volume/TestOMVolumeDeleteRequest.java  |  12 +-
 .../volume/TestOMVolumeSetOwnerRequest.java        |  27 +-
 .../volume/TestOMVolumeSetQuotaRequest.java        |  29 +-
 .../volume/acl/TestOMVolumeAddAclRequest.java      |  12 +-
 .../volume/acl/TestOMVolumeRemoveAclRequest.java   |  14 +-
 .../volume/acl/TestOMVolumeSetAclRequest.java      |  12 +-
 .../ozone/om/response/TestCleanupTableInfo.java    |   4 +-
 .../file/TestOMDirectoryCreateResponse.java        |   4 +-
 .../file/TestOMDirectoryCreateResponseWithFSO.java |   4 +-
 .../file/TestOMFileCreateResponseWithFSO.java      |   4 +-
 .../response/key/TestOMAllocateBlockResponse.java  |   4 +-
 .../key/TestOMAllocateBlockResponseWithFSO.java    |   4 +-
 .../om/response/key/TestOMKeyCommitResponse.java   |   6 +-
 .../key/TestOMKeyCommitResponseWithFSO.java        |   8 +-
 .../key/TestOMKeyCreateResponseWithFSO.java        |   4 +-
 .../om/response/key/TestOMKeyDeleteResponse.java   |   4 +-
 .../key/TestOMKeyDeleteResponseWithFSO.java        |  12 +-
 .../om/response/key/TestOMKeyRenameResponse.java   |  14 +-
 .../ozone/om/response/key/TestOMKeyResponse.java   |   4 +-
 .../om/response/key/TestOMKeysDeleteResponse.java  |   4 +-
 .../om/response/key/TestOMKeysRenameResponse.java  |   8 +-
 .../response/key/TestOMOpenKeysDeleteResponse.java |   8 +-
 ...S3MultipartUploadCommitPartResponseWithFSO.java |  10 +-
 ...stS3MultipartUploadCompleteResponseWithFSO.java |  18 +-
 .../ozone/om/upgrade/TestOMUpgradeFinalizer.java   |   2 +-
 .../om/upgrade/TestOzoneManagerPrepareState.java   |   2 +-
 .../TestOzoneDelegationTokenSecretManager.java     |   6 +-
 .../ozone/security/TestOzoneTokenIdentifier.java   |   6 +-
 .../security/acl/TestOzoneNativeAuthorizer.java    |  89 +-
 .../hadoop/ozone/security/acl/TestParentAcl.java   |  50 +-
 .../hadoop/ozone/security/acl/TestVolumeOwner.java |  51 +-
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |   2 +-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |   4 +-
 .../apache/hadoop/fs/ozone/OzoneClientUtils.java   |   2 +-
 .../apache/hadoop/fs/ozone/OzoneFSInputStream.java |   2 +-
 .../org/hadoop/ozone/recon/codegen/SqlDbUtils.java |   4 +-
 .../apache/hadoop/ozone/recon/ReconConstants.java  |   2 -
 .../hadoop/ozone/recon/ReconServerConfigKeys.java  |  23 -
 .../ozone/recon/api/MetricsProxyEndpoint.java      |   4 +-
 .../hadoop/ozone/recon/api/NSSummaryEndpoint.java  |   2 +-
 .../hadoop/ozone/recon/api/PipelineEndpoint.java   |   2 +-
 .../hadoop/ozone/recon/codec/NSSummaryCodec.java   |   2 +-
 .../ozone/recon/fsck/ContainerHealthTask.java      |   6 +-
 .../ozone/recon/scm/ReconContainerManager.java     |   4 +-
 .../hadoop/ozone/recon/scm/ReconNodeManager.java   |  11 -
 .../ozone/recon/scm/ReconPipelineFactory.java      |   2 +-
 .../scm/ReconStorageContainerManagerFacade.java    | 129 +--
 .../recon/spi/StorageContainerServiceProvider.java |  12 -
 .../recon/spi/impl/ContainerKeyPrefixCodec.java    |   2 +-
 .../spi/impl/OzoneManagerServiceProviderImpl.java  |  61 +-
 .../impl/StorageContainerServiceProviderImpl.java  | 139 +---
 .../ozone/recon/tasks/FileSizeCountTask.java       |   4 +-
 .../hadoop/ozone/recon/tasks/TableCountTask.java   |   2 +-
 .../ozone/recon/api/TestTaskStatusService.java     |   2 +-
 .../ozone/recon/fsck/TestContainerHealthTask.java  |   2 +-
 .../TestUtilizationSchemaDefinition.java           |   2 +-
 .../impl/TestOzoneManagerServiceProviderImpl.java  |  88 +-
 .../TestStorageContainerServiceProviderImpl.java   |  12 -
 .../recon/tasks/TestContainerKeyMapperTask.java    |   2 +-
 .../ozone/recon/tasks/TestOMDBUpdatesHandler.java  |   4 +-
 .../ozone/recon/tasks/TestTableCountTask.java      |   2 +-
 .../apache/hadoop/ozone/s3/OzoneClientCache.java   |  10 +-
 .../hadoop/ozone/s3/OzoneClientProducer.java       |  23 +-
 .../hadoop/ozone/s3/OzoneServiceProvider.java      |  78 ++
 .../hadoop/ozone/s3/VirtualHostStyleFilter.java    |   8 +-
 .../hadoop/ozone/s3/endpoint/BucketEndpoint.java   |  19 +-
 .../hadoop/ozone/s3/endpoint/EndpointBase.java     |   2 +-
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |  43 +-
 .../org/apache/hadoop/ozone/s3/endpoint/S3Acl.java |   6 +-
 .../hadoop/ozone/s3/endpoint/S3BucketAcl.java      |   4 +-
 .../hadoop/ozone/s3/exception/S3ErrorTable.java    |  12 +-
 .../hadoop/ozone/s3/signature/Credential.java      |   2 +-
 .../hadoop/ozone/client/OzoneBucketStub.java       |   2 +-
 .../ozone/protocolPB/TestGrpcOmTransport.java      |   5 -
 .../hadoop/ozone/s3/TestOzoneClientProducer.java   | 131 +--
 .../ozone/s3/TestVirtualHostStyleFilter.java       |   2 +-
 .../s3/commontypes/TestObjectKeyNameAdapter.java   |   2 +-
 .../hadoop/ozone/s3/endpoint/TestObjectHead.java   |   2 +-
 .../hadoop/ozone/s3/endpoint/TestRootList.java     |   2 +-
 .../tools/dev-support/findbugsExcludeFile.xml      |   4 +
 hadoop-ozone/tools/pom.xml                         |  15 +
 .../ozone/admin/om/FinalizeUpgradeSubCommand.java  |   6 +-
 .../admin/scm/FinalizeScmUpgradeSubcommand.java    |   6 +-
 .../admin/scm/FinalizeUpgradeCommandUtil.java      |   4 +-
 .../hadoop/ozone/audit/parser/AuditParser.java     |   2 +-
 .../ozone/audit/parser/common/DatabaseHelper.java  |  16 +-
 .../parser/handler/TemplateCommandHandler.java     |   2 +-
 .../ozone/audit/parser/model/AuditEntry.java       |  22 +-
 .../apache/hadoop/ozone/debug/ChunkKeyHandler.java |  12 +-
 .../org/apache/hadoop/ozone/debug/DBScanner.java   |   8 +-
 .../apache/hadoop/ozone/debug/PrefixParser.java    |   2 +-
 .../apache/hadoop/ozone/debug/ReadReplicas.java    | 247 ------
 .../hadoop/ozone/freon/BaseFreonGenerator.java     |   2 +-
 .../hadoop/ozone/freon/ChunkManagerDiskWrite.java  |   8 +-
 .../hadoop/ozone/freon/DatanodeChunkGenerator.java |  12 +-
 .../hadoop/ozone/freon/HadoopDirTreeGenerator.java |   2 +-
 .../containergenerator/GeneratorDatanode.java      |   6 +-
 .../GenerateOzoneRequiredConfigurations.java       |   2 +-
 .../hadoop/ozone/genesis/BenchMarkCRCBatch.java    | 141 ++++
 .../ozone/genesis/BenchMarkCRCStreaming.java       | 173 ++++
 .../ozone/genesis/BenchMarkContainerStateMap.java  | 199 +++++
 .../ozone/genesis/BenchMarkDatanodeDispatcher.java | 339 ++++++++
 .../ozone/genesis/BenchMarkOzoneManager.java       | 193 +++++
 .../apache/hadoop/ozone/genesis/BenchMarkSCM.java  | 126 +++
 .../ozone/genesis/BenchmarkBlockDataToString.java  | 166 ++++
 .../ozone/genesis/BenchmarkChunkManager.java       | 180 ++++
 .../org/apache/hadoop/ozone/genesis/Genesis.java   | 108 +++
 .../ozone/genesis/GenesisMemoryProfiler.java       |  61 ++
 .../apache/hadoop/ozone/genesis/GenesisUtil.java   | 162 ++++
 .../apache/hadoop/ozone/genesis/package-info.java  |  34 +-
 .../apache/hadoop/ozone/shell/OzoneAddress.java    |   4 +-
 .../hadoop/ozone/shell/bucket/BucketCommands.java  |   3 +-
 .../ozone/shell/bucket/CreateBucketHandler.java    |   2 +-
 .../ozone/shell/bucket/UpdateBucketHandler.java    |  62 --
 .../hadoop/ozone/audit/parser/TestAuditParser.java |   2 +-
 .../hadoop/ozone/conf/TestGetConfOptions.java      |   4 +-
 .../TestGenerateOzoneRequiredConfigurations.java   |   4 +-
 .../org/apache/hadoop/test/OzoneTestDriver.java    |   6 +-
 pom.xml                                            |  13 +
 744 files changed, 6698 insertions(+), 9535 deletions(-)

diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml
index 318a7869d4..6d4a22921b 100644
--- a/.github/workflows/post-commit.yml
+++ b/.github/workflows/post-commit.yml
@@ -125,12 +125,6 @@ jobs:
     steps:
       - name: Checkout project
         uses: actions/checkout@v2
-        if: matrix.check != 'bats'
-      - name: Checkout project with history
-        uses: actions/checkout@v2
-        with:
-          fetch-depth: 0
-        if: matrix.check == 'bats'
       - name: Cache for maven dependencies
         uses: actions/cache@v2
         with:
diff --git a/.gitignore b/.gitignore
index 1ec550bb99..a302cc04ed 100644
--- a/.gitignore
+++ b/.gitignore
@@ -68,7 +68,6 @@ hadoop-ozone/recon/node_modules
 .mvn
 
 .dev-tools
-dev-support/ci/bats-assert
-dev-support/ci/bats-support
+
 
 hadoop-ozone/dist/src/main/license/current.txt
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index af9958941f..f467c80a70 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -20,8 +20,7 @@ We welcome contributions of:
       * Unit Tests (JUnit / Java)
       * Acceptance Tests (docker + robot framework)
       * Blockade tests (python + blockade) 
-      * Performance: We have multiple type of load generator / benchmark tools (`ozone freon`),
-        which can be used to test cluster and report problems.
+      * Performance: We have multiple type of load generator / benchmark tools (`ozone freon`, `ozone genesis`), which can be used to test cluster and report problems.
  * **Bug reports** pointing out broken functionality, docs, or suggestions for improvements are always welcome!
  
 ## Who To Contact
diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats
index 809312308e..7e2dff321e 100644
--- a/dev-support/ci/selective_ci_checks.bats
+++ b/dev-support/ci/selective_ci_checks.bats
@@ -66,17 +66,6 @@ load bats-assert/load.bash
   assert_output -p needs-kubernetes-tests=true
 }
 
-@test "runner image update" {
-  run dev-support/ci/selective_ci_checks.sh b95eeba82a
-
-  assert_output -p 'basic-checks=["rat"]'
-  assert_output -p needs-build=true
-  assert_output -p needs-compose-tests=true
-  assert_output -p needs-dependency-check=true
-  assert_output -p needs-integration-tests=false
-  assert_output -p needs-kubernetes-tests=true
-}
-
 @test "check script" {
   run dev-support/ci/selective_ci_checks.sh 316899152
 
@@ -88,21 +77,10 @@ load bats-assert/load.bash
   assert_output -p needs-kubernetes-tests=true
 }
 
-@test "integration and unit" {
-  run dev-support/ci/selective_ci_checks.sh 9aebf6e25
-
-  assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]'
-  assert_output -p needs-build=false
-  assert_output -p needs-compose-tests=false
-  assert_output -p needs-dependency-check=false
-  assert_output -p needs-integration-tests=true
-  assert_output -p needs-kubernetes-tests=false
-}
-
 @test "integration only" {
   run dev-support/ci/selective_ci_checks.sh 61396ba9f
 
-  assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]'
+  assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]'
   assert_output -p needs-build=false
   assert_output -p needs-compose-tests=false
   assert_output -p needs-dependency-check=false
diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh
index 3989afe36b..4a490cd56a 100755
--- a/dev-support/ci/selective_ci_checks.sh
+++ b/dev-support/ci/selective_ci_checks.sh
@@ -219,10 +219,7 @@ function get_count_compose_files() {
     start_end::group_start "Count compose files"
     local pattern_array=(
         "^hadoop-ozone/dev-support/checks/acceptance.sh"
-        "^hadoop-ozone/dist"
-    )
-    local ignore_array=(
-        "^hadoop-ozone/dist/src/main/k8s"
+        "^hadoop-ozone/dist/src/main/compose"
     )
     filter_changed_files true
     COUNT_COMPOSE_CHANGED_FILES=${match_count}
@@ -261,10 +258,7 @@ function get_count_kubernetes_files() {
     start_end::group_start "Count kubernetes files"
     local pattern_array=(
         "^hadoop-ozone/dev-support/checks/kubernetes.sh"
-        "^hadoop-ozone/dist"
-    )
-    local ignore_array=(
-        "^hadoop-ozone/dist/src/main/compose"
+        "^hadoop-ozone/dist/src/main/k8s"
     )
     filter_changed_files true
     COUNT_KUBERNETES_CHANGED_FILES=${match_count}
@@ -338,9 +332,6 @@ function check_needs_checkstyle() {
         "pom.xml"
         "src/..../java"
     )
-    local ignore_array=(
-        "^hadoop-ozone/dist"
-    )
     filter_changed_files
 
     if [[ ${match_count} != "0" ]]; then
@@ -382,9 +373,6 @@ function check_needs_findbugs() {
         "pom.xml"
         "src/..../java"
     )
-    local ignore_array=(
-        "^hadoop-ozone/dist"
-    )
     filter_changed_files
 
     if [[ ${match_count} != "0" ]]; then
@@ -403,11 +391,6 @@ function check_needs_unit_test() {
         "src/..../java"
         "src/..../resources"
     )
-    local ignore_array=(
-        "^hadoop-ozone/dist"
-        "^hadoop-ozone/fault-injection-test/mini-chaos-tests"
-        "^hadoop-ozone/integration-test"
-    )
     filter_changed_files
 
     if [[ ${match_count} != "0" ]]; then
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index a268495546..f19853c1aa 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -159,7 +159,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 
   private synchronized void connectToDatanode(DatanodeDetails dn)
       throws IOException {
-    if (isConnected(dn)) {
+    if (isConnected(dn)){
       return;
     }
     // read port from the data node, on failure use default configured
@@ -269,10 +269,10 @@ public class XceiverClientGrpc extends XceiverClientSpi {
         Thread.currentThread().interrupt();
       }
     }
-    try {
+    try{
       for (Map.Entry<DatanodeDetails,
               CompletableFuture<ContainerCommandResponseProto> >
-              entry : futureHashMap.entrySet()) {
+              entry : futureHashMap.entrySet()){
         responseProtoHashMap.put(entry.getKey(), entry.getValue().get());
       }
     } catch (InterruptedException e) {
@@ -538,7 +538,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   }
 
   private synchronized void checkOpen(DatanodeDetails dn)
-      throws IOException {
+      throws IOException{
     if (closed) {
       throw new IOException("This channel is not connected.");
     }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index 07fd0a8c2d..6b74adb07f 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -206,8 +206,8 @@ public final class HddsClientUtils {
     if (keyName == null) {
       throw new IllegalArgumentException("Key name is null");
     }
-    if (!OzoneConsts.KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX
-            .matcher(keyName).matches()) {
+    if(!OzoneConsts.KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX
+            .matcher(keyName).matches()){
       throw new IllegalArgumentException("Invalid key name: " + keyName);
     }
   }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
index bd97cf248d..7475db27d0 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
@@ -152,7 +152,7 @@ public class BlockInputStream extends InputStream
         // retry according to retry policy.
         chunks = getChunkInfos();
         break;
-      } catch (SCMSecurityException ex) {
+      } catch(SCMSecurityException ex) {
         throw ex;
       } catch (StorageContainerException ex) {
         refreshPipeline(ex);
@@ -340,9 +340,9 @@ public class BlockInputStream extends InputStream
         } else {
           throw e;
         }
-      } catch (SCMSecurityException ex) {
+      } catch(SCMSecurityException ex) {
         throw ex;
-      } catch (IOException ex) {
+      } catch(IOException ex) {
         // We got a IOException which might be due
         // to DN down or connectivity issue.
         if (shouldRetryRead(ex)) {
@@ -512,7 +512,7 @@ public class BlockInputStream extends InputStream
   }
 
   @Override
-  public synchronized void unbuffer() {
+  public void unbuffer() {
     storePosition();
     releaseClient();
 
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index 8b3f817a2e..859d8080e6 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -487,7 +487,7 @@ public class BlockOutputStream extends OutputStream {
       } catch (Throwable e) {
         String msg = "Failed to flush. error: " + e.getMessage();
         LOG.error(msg, e);
-        throw e;
+        throw new RuntimeException(msg, e);
       }
     }
   }
@@ -553,7 +553,7 @@ public class BlockOutputStream extends OutputStream {
       } catch (Throwable e) {
         String msg = "Failed to flush. error: " + e.getMessage();
         LOG.error(msg, e);
-        throw e;
+        throw new RuntimeException(msg, e);
       } finally {
         cleanup(false);
       }
@@ -708,7 +708,7 @@ public class BlockOutputStream extends OutputStream {
       boolean processExecutionException)
       throws IOException {
     LOG.error("Command execution was interrupted.");
-    if (processExecutionException) {
+    if(processExecutionException) {
       handleExecutionException(ex);
     } else {
       throw new IOException(EXCEPTION_MSG + ex.toString(), ex);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
index a520f8a6a5..94fa87a71e 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
@@ -46,7 +46,7 @@ public class BufferPool {
   }
 
   public BufferPool(int bufferSize, int capacity,
-      Function<ByteBuffer, ByteString> byteStringConversion) {
+      Function<ByteBuffer, ByteString> byteStringConversion){
     this.capacity = capacity;
     this.bufferSize = bufferSize;
     bufferList = new ArrayList<>(capacity);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java
index 802adc11f5..7238f2a2a0 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java
@@ -97,7 +97,7 @@ public class RatisBlockOutputStream extends BlockOutputStream {
 
   @Override
   XceiverClientReply sendWatchForCommit(boolean bufferFull) throws IOException {
-    return bufferFull ? commitWatcher.watchOnFirstIndex()
+    return bufferFull? commitWatcher.watchOnFirstIndex()
         : commitWatcher.watchOnLastIndex();
   }
 
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 5900327492..8f93610111 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -166,21 +166,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>junit-jupiter-api</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.junit.jupiter</groupId>
-      <artifactId>junit-jupiter-engine</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.junit.vintage</groupId>
-      <artifactId>junit-vintage-engine</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.junit.platform</groupId>
-      <artifactId>junit-platform-launcher</artifactId>
-      <scope>test</scope>
-    </dependency>
     <dependency>
       <groupId>io.jaegertracing</groupId>
       <artifactId>jaeger-client</artifactId>
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index d1e3c19282..f538595db5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -151,7 +151,7 @@ public final class HddsConfigKeys {
    */
   public static final String HDDS_X509_MAX_DURATION = "hdds.x509.max.duration";
   // Limit Certificate duration to a max value of 5 years.
-  public static final String HDDS_X509_MAX_DURATION_DEFAULT = "P1865D";
+  public static final String HDDS_X509_MAX_DURATION_DEFAULT= "P1865D";
   public static final String HDDS_X509_SIGNATURE_ALGO =
       "hdds.x509.signature.algorithm";
   public static final String HDDS_X509_SIGNATURE_ALGO_DEFAULT = "SHA256withRSA";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index ffbb3e3340..5abe8fbb31 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -632,7 +632,7 @@ public final class HddsUtils {
    * Utility method to round up bytes into the nearest MB.
    */
   public static int roundupMb(long bytes) {
-    return (int)Math.ceil((double) bytes / (double) ONE_MB);
+    return (int)Math.ceil((double) bytes/(double) ONE_MB);
   }
 
   /**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
index 792a9d0d84..77d193035f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
@@ -151,6 +151,6 @@ public final class StringUtils {
 
   public static String appendIfNotPresent(String str, char c) {
     Preconditions.checkNotNull(str, "Input string is null");
-    return str.isEmpty() || str.charAt(str.length() - 1) != c ? str + c : str;
+    return str.isEmpty() || str.charAt(str.length() - 1) != c ? str + c: str;
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java
index 03dc00518d..37da0a3b27 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java
@@ -50,7 +50,7 @@ public final class InterfaceAudience {
    */
   @Documented
   @Retention(RetentionPolicy.RUNTIME)
-  public @interface Public { };
+  public @interface Public {};
   
   /**
    * Intended only for the project(s) specified in the annotation.
@@ -67,7 +67,7 @@ public final class InterfaceAudience {
    */
   @Documented
   @Retention(RetentionPolicy.RUNTIME)
-  public @interface Private { };
+  public @interface Private {};
 
-  private InterfaceAudience() { } // Audience can't exist on its own
+  private InterfaceAudience() {} // Audience can't exist on its own
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
index 794ebd2d74..9945690a9b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
@@ -43,7 +43,7 @@ public final class OzoneQuota {
   public static final String OZONE_QUOTA_TB = "TB";
 
   /** Quota Units.*/
-  public enum Units { B, KB, MB, GB, TB }
+  public enum Units {B, KB, MB, GB, TB}
 
   // Quota to decide how many buckets can be created.
   private long quotaInNamespace;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java
index 5403469fa7..205cca1100 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java
@@ -28,14 +28,13 @@ public class QuotaList {
   private ArrayList<OzoneQuota.Units> unitQuota;
   private ArrayList<Long> sizeQuota;
 
-  public QuotaList() {
+  public QuotaList(){
     ozoneQuota = new ArrayList<String>();
     unitQuota = new ArrayList<OzoneQuota.Units>();
     sizeQuota = new ArrayList<Long>();
   }
 
-  public void addQuotaList(
-      String oQuota, OzoneQuota.Units uQuota, Long sQuota) {
+  public void addQuotaList(String oQuota, OzoneQuota.Units uQuota, Long sQuota){
     ozoneQuota.add(oQuota);
     unitQuota.add(uQuota);
     sizeQuota.add(sQuota);
@@ -53,15 +52,15 @@ public class QuotaList {
     return this.unitQuota;
   }
 
-  public OzoneQuota.Units getUnits(String oQuota) {
+  public OzoneQuota.Units getUnits(String oQuota){
     return unitQuota.get(ozoneQuota.indexOf(oQuota));
   }
 
-  public Long getQuotaSize(OzoneQuota.Units uQuota) {
+  public Long getQuotaSize(OzoneQuota.Units uQuota){
     return sizeQuota.get(unitQuota.indexOf(uQuota));
   }
 
-  public OzoneQuota.Units getQuotaUnit(Long sQuota) {
+  public OzoneQuota.Units getQuotaUnit(Long sQuota){
     return unitQuota.get(sizeQuota.indexOf(sQuota));
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
index 8623a0e7f3..044bd6f833 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
@@ -46,7 +46,7 @@ public enum ReplicationFactor {
    * @return ReplicationFactor
    */
   public static ReplicationFactor valueOf(int value) {
-    if (value == 1) {
+    if(value == 1) {
       return ONE;
     }
     if (value == 3) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index 6b7e7c6384..d24cb68d50 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -109,7 +109,6 @@ public class OzoneConfiguration extends Configuration
     setClassLoader(conf.getClassLoader());
     if (!(conf instanceof OzoneConfiguration)) {
       loadDefaults();
-      addResource(conf);
     }
   }
 
@@ -127,7 +126,6 @@ public class OzoneConfiguration extends Configuration
     } catch (IOException e) {
       e.printStackTrace();
     }
-    addResource("ozone-default.xml");
     // Adding core-site here because properties from core-site are
     // distributed to executors by spark driver. Ozone properties which are
     // added to core-site, will be overridden by properties from adding Resource
@@ -244,6 +242,7 @@ public class OzoneConfiguration extends Configuration
     // adds the default resources
     Configuration.addDefaultResource("hdfs-default.xml");
     Configuration.addDefaultResource("hdfs-site.xml");
+    Configuration.addDefaultResource("ozone-default.xml");
   }
 
   /**
@@ -296,7 +295,7 @@ public class OzoneConfiguration extends Configuration
     return configMap;
   }
 
-  private static void addDeprecatedKeys() {
+  private static void addDeprecatedKeys(){
     Configuration.addDeprecations(new DeprecationDelta[]{
         new DeprecationDelta("ozone.datanode.pipeline.limit",
             ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT),
@@ -305,9 +304,7 @@ public class OzoneConfiguration extends Configuration
            HDDS_DATANODE_RATIS_PREFIX_KEY + "."
            + RaftServerConfigKeys.PREFIX + "." + "rpc.slowness.timeout"),
         new DeprecationDelta("dfs.datanode.keytab.file",
-            DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY),
-        new DeprecationDelta("ozone.scm.chunk.layout",
-            ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY)
+            DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY)
     });
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
index 319fefdf4e..782a3e18a4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
@@ -127,7 +127,7 @@ public class CachingSpaceUsageSource implements SpaceUsageSource {
 
   private void refresh() {
     //only one `refresh` can be running at a certain moment
-    if (isRefreshRunning.compareAndSet(false, true)) {
+    if(isRefreshRunning.compareAndSet(false, true)) {
       try {
         cachedValue.set(source.getUsedSpace());
       } catch (RuntimeException e) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index 01bd0f482a..aef3c298af 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -713,7 +713,7 @@ public class DatanodeDetails extends NodeImpl implements
      *
      * @return DatanodeDetails.Builder
      */
-    public Builder setPersistedOpState(HddsProtos.NodeOperationalState state) {
+    public Builder setPersistedOpState(HddsProtos.NodeOperationalState state){
       this.persistedOpState = state;
       return this;
     }
@@ -726,7 +726,7 @@ public class DatanodeDetails extends NodeImpl implements
      *
      * @return DatanodeDetails.Builder
      */
-    public Builder setPersistedOpStateExpiry(long expiry) {
+    public Builder setPersistedOpStateExpiry(long expiry){
       this.persistedOpStateExpiryEpochSec = expiry;
       return this;
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index c1cd865036..50480c1dca 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -136,7 +136,7 @@ public final class RatisHelper {
   }
 
   private static RaftGroup newRaftGroup(Collection<RaftPeer> peers) {
-    return peers.isEmpty() ? emptyRaftGroup()
+    return peers.isEmpty()? emptyRaftGroup()
         : RaftGroup.valueOf(DUMMY_GROUP_ID, peers);
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java
index d72e27a18a..c91a186b35 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java
@@ -40,7 +40,7 @@ public class ReconConfig {
       type = ConfigType.STRING,
       defaultValue = "",
       tags = { ConfigTag.SECURITY, ConfigTag.RECON, ConfigTag.OZONE },
-      description = "The keytab file used by Recon daemon to login as " +
+      description = "The keytab file used by Recon daemon to login as "+
           "its service principal."
   )
   private String keytab;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
index 14a229b5e7..b5f6e48121 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
@@ -32,7 +32,7 @@ import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations;
  * Ozone configuration.
  */
 public final class ByteStringConversion {
-  private ByteStringConversion() { } // no instantiation.
+  private ByteStringConversion(){} // no instantiation.
 
   /**
    * Creates the conversion function to be used to convert ByteBuffers to
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
index ce79ec2abb..baee0384fb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
@@ -43,7 +43,7 @@ public class ScmConfig {
       type = ConfigType.STRING,
       defaultValue = "",
       tags = { ConfigTag.SECURITY, ConfigTag.OZONE },
-      description = "The keytab file used by SCM daemon to login as " +
+      description = "The keytab file used by SCM daemon to login as "+
           "its service principal."
   )
   private String keytab;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index c1f43c6eb5..a4c314fc33 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -138,8 +138,8 @@ public final class ScmConfigKeys {
   public static final String OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_DEFAULT =
       "64KB";
 
-  public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY =
-      "ozone.scm.container.layout";
+  public static final String OZONE_SCM_CHUNK_LAYOUT_KEY =
+      "ozone.scm.chunk.layout";
 
   public static final String OZONE_SCM_CLIENT_PORT_KEY =
       "ozone.scm.client.port";
@@ -435,7 +435,7 @@ public final class ScmConfigKeys {
   public static final String OZONE_SCM_HA_ENABLE_KEY
       = "ozone.scm.ratis.enable";
   public static final boolean OZONE_SCM_HA_ENABLE_DEFAULT
-      = true;
+      = false;
   public static final String OZONE_SCM_RATIS_PORT_KEY
       = "ozone.scm.ratis.port";
   public static final int OZONE_SCM_RATIS_PORT_DEFAULT
@@ -525,7 +525,7 @@ public final class ScmConfigKeys {
 
   public static final String OZONE_SCM_HA_RAFT_LOG_PURGE_GAP =
           "ozone.scm.ha.ratis.log.purge.gap";
-  public static final int OZONE_SCM_HA_RAFT_LOG_PURGE_GAP_DEFAULT = 1000000;
+  public static final int OZONE_SCM_HA_RAFT_LOG_PURGE_GAP_DEFAULT =1000000;
 
   public static final String OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD =
           "ozone.scm.ha.ratis.snapshot.threshold";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index f1885f890e..351870a3cd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdds.scm.client;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
 import org.apache.hadoop.hdds.scm.DatanodeAdminError;
-import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo;
-import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -72,16 +70,6 @@ public interface ScmClient extends Closeable {
   ContainerWithPipeline getContainerWithPipeline(long containerId)
       throws IOException;
 
-  /**
-   * Gets the list of ReplicaInfo known by SCM for a given container.
-   * @param containerId - The Container ID
-   * @return List of ContainerReplicaInfo for the container or an empty list
-   *         if none.
-   * @throws IOException
-   */
-  List<ContainerReplicaInfo> getContainerReplicas(
-      long containerId) throws IOException;
-
   /**
    * Close a container.
    *
@@ -320,20 +308,12 @@ public interface ScmClient extends Closeable {
    */
   boolean getReplicationManagerStatus() throws IOException;
 
-  /**
-   * Returns the latest container summary report generated by Replication
-   * Manager.
-   * @return The latest ReplicationManagerReport.
-   * @throws IOException
-   */
-  ReplicationManagerReport getReplicationManagerReport() throws IOException;
-
   /**
    * Start ContainerBalancer.
    */
   boolean startContainerBalancer(Optional<Double> threshold,
-      Optional<Integer> iterations,
-      Optional<Integer> maxDatanodesPercentageToInvolvePerIteration,
+      Optional<Integer> idleiterations,
+      Optional<Double> maxDatanodesRatioToInvolvePerIteration,
       Optional<Long> maxSizeToMovePerIterationInGB,
       Optional<Long> maxSizeEnteringTargetInGB,
       Optional<Long> maxSizeLeavingSourceInGB) throws IOException;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaInfo.java
deleted file mode 100644
index b30dff716d..0000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaInfo.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.util.UUID;
-
-/**
- * Class which stores ContainerReplica details on the client.
- */
-public final class ContainerReplicaInfo {
-
-  private long containerID;
-  private String state;
-  private DatanodeDetails datanodeDetails;
-  private UUID placeOfBirth;
-  private long sequenceId;
-  private long keyCount;
-  private long bytesUsed;
-
-  public static ContainerReplicaInfo fromProto(
-      HddsProtos.SCMContainerReplicaProto proto) {
-    ContainerReplicaInfo.Builder builder = new ContainerReplicaInfo.Builder();
-    builder.setContainerID(proto.getContainerID())
-        .setState(proto.getState())
-        .setDatanodeDetails(DatanodeDetails
-            .getFromProtoBuf(proto.getDatanodeDetails()))
-        .setPlaceOfBirth(UUID.fromString(proto.getPlaceOfBirth()))
-        .setSequenceId(proto.getSequenceID())
-        .setKeyCount(proto.getKeyCount())
-        .setBytesUsed(proto.getBytesUsed());
-    return builder.build();
-  }
-
-  private ContainerReplicaInfo() {
-  }
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public String getState() {
-    return state;
-  }
-
-  public DatanodeDetails getDatanodeDetails() {
-    return datanodeDetails;
-  }
-
-  public UUID getPlaceOfBirth() {
-    return placeOfBirth;
-  }
-
-  public long getSequenceId() {
-    return sequenceId;
-  }
-
-  public long getKeyCount() {
-    return keyCount;
-  }
-
-  public long getBytesUsed() {
-    return bytesUsed;
-  }
-
-  /**
-   * Builder for ContainerReplicaInfo class.
-   */
-  public static class Builder {
-
-    private final ContainerReplicaInfo subject = new ContainerReplicaInfo();
-
-    public Builder setContainerID(long containerID) {
-      subject.containerID = containerID;
-      return this;
-    }
-
-    public Builder setState(String state) {
-      subject.state = state;
-      return this;
-    }
-
-    public Builder setDatanodeDetails(DatanodeDetails datanodeDetails) {
-      subject.datanodeDetails = datanodeDetails;
-      return this;
-    }
-
-    public Builder setPlaceOfBirth(UUID placeOfBirth) {
-      subject.placeOfBirth = placeOfBirth;
-      return this;
-    }
-
-    public Builder setSequenceId(long sequenceId) {
-      subject.sequenceId = sequenceId;
-      return this;
-    }
-
-    public Builder setKeyCount(long keyCount) {
-      subject.keyCount = keyCount;
-      return this;
-    }
-
-    public Builder setBytesUsed(long bytesUsed) {
-      subject.bytesUsed = bytesUsed;
-      return this;
-    }
-
-    public ContainerReplicaInfo build() {
-      return subject;
-    }
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java
deleted file mode 100644
index 2f2a7bf3e5..0000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.LongAdder;
-import java.util.stream.Collectors;
-
-/**
- * This class is used by ReplicationManager. Each time ReplicationManager runs,
- * it creates a new instance of this class and increments the various counters
- * to allow for creating a report on the various container states within the
- * system. There is a counter for each LifeCycleState (open, closing, closed
- * etc) and the sum of each of the lifecycle state counters should equal the
- * total number of containers in SCM. Ie, each container can only be in one of
- * the Lifecycle states at any time.
- *
- * Additionally, there are a set of counters for the "health state" of the
- * containers, defined here in the HealthState enum. It is normal for containers
- * to be in these health states from time to time, but the presence of a
- * container in one of these health states generally means cluster is in a
- * degraded state. Normally, the cluster will recover by itself, but manual
- * intervention may be needed in some cases.
- *
- * To aid debugging, when containers are in one of the health states, a list of
- * up to SAMPLE_LIMIT container IDs are recorded in the report for each of the
- * states.
- */
-public class ReplicationManagerReport {
-
-  public static final int SAMPLE_LIMIT = 100;
-  private long reportTimeStamp;
-
-  /**
-   * Enum representing various health states a container can be in.
-   */
-  public enum HealthState {
-    UNDER_REPLICATED("Containers with insufficient replicas",
-        "NumUnderReplicatedContainers"),
-    MIS_REPLICATED("Containers with insufficient racks",
-        "NumMisReplicatedContainers"),
-    OVER_REPLICATED("Containers with more replicas than required",
-        "NumOverReplicatedContainers"),
-    MISSING("Containers with no online replicas",
-        "NumMissingContainers"),
-    UNHEALTHY(
-        "Containers Closed or Quasi_Closed having some replicas in " +
-            "a different state", "NumUnhealthyContainers"),
-    EMPTY("Containers having no blocks", "NumEmptyContainers"),
-    OPEN_UNHEALTHY(
-        "Containers open and having replicas with different states",
-        "NumOpenUnhealthyContainers"),
-    QUASI_CLOSED_STUCK(
-        "Containers QuasiClosed with insufficient datanode origins",
-        "NumStuckQuasiClosedContainers");
-
-    private String description;
-    private String metricName;
-
-    HealthState(String desc, String name) {
-      this.description = desc;
-      this.metricName = name;
-    }
-
-    public String getMetricName() {
-      return this.metricName;
-    }
-
-    public String getDescription() {
-      return this.description;
-    }
-  }
-
-  private final Map<String, LongAdder> stats;
-  private final Map<String, List<ContainerID>> containerSample
-      = new ConcurrentHashMap<>();
-
-  public static ReplicationManagerReport fromProtobuf(
-      HddsProtos.ReplicationManagerReportProto proto) {
-    ReplicationManagerReport report = new ReplicationManagerReport();
-    report.setTimestamp(proto.getTimestamp());
-    for (HddsProtos.KeyIntValue stat : proto.getStatList()) {
-      report.setStat(stat.getKey(), stat.getValue());
-    }
-    for (HddsProtos.KeyContainerIDList sample : proto.getStatSampleList()) {
-      report.setSample(sample.getKey(), sample.getContainerList()
-          .stream()
-          .map(c -> ContainerID.getFromProtobuf(c))
-          .collect(Collectors.toList()));
-    }
-    return report;
-  }
-
-  public ReplicationManagerReport() {
-    stats = createStatsMap();
-  }
-
-  public void increment(HealthState stat) {
-    increment(stat.toString());
-  }
-
-  public void increment(HddsProtos.LifeCycleState stat) {
-    increment(stat.toString());
-  }
-
-  public void incrementAndSample(HealthState stat, ContainerID container) {
-    incrementAndSample(stat.toString(), container);
-  }
-
-  public void incrementAndSample(HddsProtos.LifeCycleState stat,
-      ContainerID container) {
-    incrementAndSample(stat.toString(), container);
-  }
-
-  public void setComplete() {
-    reportTimeStamp = System.currentTimeMillis();
-  }
-
-  /**
-   * The epoch time in milli-seconds when this report was completed.
-   * @return epoch time in milli-seconds.
-   */
-  public long getReportTimeStamp() {
-    return reportTimeStamp;
-  }
-
-  /**
-   * Get the stat for the given LifeCycleState. If there is no stat available
-   * for that stat -1 is returned.
-   * @param stat The requested stat.
-   * @return The stat value or -1 if it is not present
-   */
-  public long getStat(HddsProtos.LifeCycleState stat) {
-    return getStat(stat.toString());
-  }
-
-  /**
-   * Get the stat for the given HealthState. If there is no stat available
-   * for that stat -1 is returned.
-   * @param stat The requested stat.
-   * @return The stat value or -1 if it is not present
-   */
-  public long getStat(HealthState stat) {
-    return getStat(stat.toString());
-  }
-
-  /**
-   * Returns the stat requested, or -1 if it does not exist.
-   * @param stat The request stat
-   * @return The value of the stat or -1 if it does not exist.
-   */
-  private long getStat(String stat) {
-    LongAdder val = stats.get(stat);
-    if (val == null) {
-      return -1;
-    }
-    return val.longValue();
-  }
-
-  protected void setTimestamp(long timestamp) {
-    this.reportTimeStamp = timestamp;
-  }
-
-  protected void setStat(String stat, long value) {
-    LongAdder adder = getStatAndEnsurePresent(stat);
-    if (adder.longValue() != 0) {
-      throw new IllegalStateException(stat + " is expected to be zero");
-    }
-    adder.add(value);
-  }
-
-  protected void setSample(String stat, List<ContainerID> sample) {
-    // First get the stat, as we should not receive a sample for a stat which
-    // does not exist.
-    getStatAndEnsurePresent(stat);
-    // Now check there is not already a sample for this stat
-    List<ContainerID> existingSample = containerSample.get(stat);
-    if (existingSample != null) {
-      throw new IllegalStateException(stat
-          + " is not expected to have existing samples");
-    }
-    containerSample.put(stat, sample);
-  }
-
-  public List<ContainerID> getSample(HddsProtos.LifeCycleState stat) {
-    return getSample(stat.toString());
-  }
-
-  public List<ContainerID> getSample(HealthState stat) {
-    return getSample(stat.toString());
-  }
-
-  private List<ContainerID> getSample(String stat) {
-    List<ContainerID> list = containerSample.get(stat);
-    if (list == null) {
-      return Collections.emptyList();
-    }
-    synchronized (list) {
-      return new ArrayList<>(list);
-    }
-  }
-
-  private void increment(String stat) {
-    getStatAndEnsurePresent(stat).increment();
-  }
-
-  private LongAdder getStatAndEnsurePresent(String stat) {
-    LongAdder adder = stats.get(stat);
-    if (adder == null) {
-      throw new IllegalArgumentException("Unexpected stat " + stat);
-    }
-    return adder;
-  }
-
-  private void incrementAndSample(String stat, ContainerID container) {
-    increment(stat);
-    List<ContainerID> list = containerSample
-        .computeIfAbsent(stat, k -> new ArrayList<>());
-    synchronized (list) {
-      if (list.size() < SAMPLE_LIMIT) {
-        list.add(container);
-      }
-    }
-  }
-
-  private Map<String, LongAdder> createStatsMap() {
-    Map<String, LongAdder> map = new HashMap<>();
-    for (HddsProtos.LifeCycleState s : HddsProtos.LifeCycleState.values()) {
-      map.put(s.toString(), new LongAdder());
-    }
-    for (HealthState s : HealthState.values()) {
-      map.put(s.toString(), new LongAdder());
-    }
-    return map;
-  }
-
-  public HddsProtos.ReplicationManagerReportProto toProtobuf() {
-    HddsProtos.ReplicationManagerReportProto.Builder proto =
-        HddsProtos.ReplicationManagerReportProto.newBuilder();
-    proto.setTimestamp(getReportTimeStamp());
-
-    for (Map.Entry<String, LongAdder> e : stats.entrySet()) {
-      proto.addStat(HddsProtos.KeyIntValue.newBuilder()
-          .setKey(e.getKey())
-          .setValue(e.getValue().longValue())
-          .build());
-    }
-
-    for (Map.Entry<String, List<ContainerID>> e : containerSample.entrySet()) {
-      HddsProtos.KeyContainerIDList.Builder sample
-          = HddsProtos.KeyContainerIDList.newBuilder();
-      sample.setKey(e.getKey());
-      for (ContainerID container : e.getValue()) {
-        sample.addContainer(container.getProtobuf());
-      }
-      proto.addStatSample(sample.build());
-    }
-    return proto.build();
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
index b2b566a7a4..644659557a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
@@ -38,7 +38,7 @@ import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR;
  */
 public class InnerNodeImpl extends NodeImpl implements InnerNode {
   protected static class Factory implements InnerNode.Factory<InnerNodeImpl> {
-    protected Factory() { }
+    protected Factory() {}
 
     @Override
     public InnerNodeImpl newInnerNode(String name, String location,
@@ -93,7 +93,7 @@ public class InnerNodeImpl extends NodeImpl implements InnerNode {
     } else {
       for (Node node: childrenMap.values()) {
         if (node instanceof InnerNode) {
-          count += ((InnerNode)node).getNumOfNodes(level - 1);
+          count += ((InnerNode)node).getNumOfNodes(level -1);
         } else {
           throw new RuntimeException("Cannot support Level:" + level +
               " on this node " + this.toString());
@@ -119,7 +119,7 @@ public class InnerNodeImpl extends NodeImpl implements InnerNode {
     } else {
       for (Node node: childrenMap.values()) {
         if (node instanceof InnerNode) {
-          result.addAll(((InnerNode)node).getNodes(level - 1));
+          result.addAll(((InnerNode)node).getNodes(level -1));
         } else {
           throw new RuntimeException("Cannot support Level:" + level +
               " on this node " + this.toString());
@@ -265,7 +265,7 @@ public class InnerNodeImpl extends NodeImpl implements InnerNode {
     if (child == null) {
       return null;
     }
-    if (path.length == 1) {
+    if (path.length == 1){
       return child;
     }
     if (child instanceof InnerNode) {
@@ -292,7 +292,7 @@ public class InnerNodeImpl extends NodeImpl implements InnerNode {
       }
       return getChildNode(leafIndex);
     } else {
-      for (Node node : childrenMap.values()) {
+      for(Node node : childrenMap.values()) {
         InnerNodeImpl child = (InnerNodeImpl)node;
         int leafCount = child.getNumOfLeaves();
         if (leafIndex < leafCount) {
@@ -468,7 +468,7 @@ public class InnerNodeImpl extends NodeImpl implements InnerNode {
     if (leafIndex >= getNumOfChildren()) {
       return null;
     }
-    for (Node node : childrenMap.values()) {
+    for(Node node : childrenMap.values()) {
       if (excludedNodes != null && excludedNodes.contains(node)) {
         continue;
       }
@@ -519,7 +519,7 @@ public class InnerNodeImpl extends NodeImpl implements InnerNode {
   private Node getChildNode(int index) {
     Iterator iterator = childrenMap.values().iterator();
     Node node = null;
-    while (index >= 0 && iterator.hasNext()) {
+    while(index >= 0 && iterator.hasNext()) {
       node = (Node)iterator.next();
       index--;
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
index 206a0fd73b..43765a6e5a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
@@ -45,7 +45,7 @@ import static org.apache.hadoop.hdds.scm.net.NetConstants.ANCESTOR_GENERATION_DE
  * (computers) and inner nodes represent datacenter/core-switches/routers that
  * manages traffic in/out of data centers or racks.
  */
-public class NetworkTopologyImpl implements NetworkTopology {
+public class NetworkTopologyImpl implements NetworkTopology{
   public static final Logger LOG =
       LoggerFactory.getLogger(NetworkTopologyImpl.class);
 
@@ -91,7 +91,7 @@ public class NetworkTopologyImpl implements NetworkTopology {
     Preconditions.checkArgument(node != null, "node cannot be null");
     if (node instanceof InnerNode) {
       throw new IllegalArgumentException(
-          "Not allowed to add an inner node: " + node.getNetworkFullPath());
+          "Not allowed to add an inner node: "+ node.getNetworkFullPath());
     }
     int newDepth = NetUtils.locationToDepth(node.getNetworkLocation()) + 1;
 
@@ -104,7 +104,7 @@ public class NetworkTopologyImpl implements NetworkTopology {
     boolean add;
     try {
       add = clusterTree.add(node);
-    } finally {
+    }finally {
       netlock.writeLock().unlock();
     }
 
@@ -126,12 +126,12 @@ public class NetworkTopologyImpl implements NetworkTopology {
     Preconditions.checkArgument(node != null, "node cannot be null");
     if (node instanceof InnerNode) {
       throw new IllegalArgumentException(
-          "Not allowed to remove an inner node: " + node.getNetworkFullPath());
+          "Not allowed to remove an inner node: "+ node.getNetworkFullPath());
     }
     netlock.writeLock().lock();
     try {
       clusterTree.remove(node);
-    } finally {
+    }finally {
       netlock.writeLock().unlock();
     }
     LOG.info("Removed a node: {}", node.getNetworkFullPath());
@@ -534,7 +534,7 @@ public class NetworkTopologyImpl implements NetworkTopology {
             " generation  " + ancestorGen);
       }
       // affinity ancestor should has overlap with scope
-      if (affinityAncestor.getNetworkFullPath().startsWith(scope)) {
+      if (affinityAncestor.getNetworkFullPath().startsWith(scope)){
         finalScope = affinityAncestor.getNetworkFullPath();
       } else if (!scope.startsWith(affinityAncestor.getNetworkFullPath())) {
         return null;
@@ -655,21 +655,21 @@ public class NetworkTopologyImpl implements NetworkTopology {
       if (level1 > maxLevel || level2 > maxLevel) {
         return Integer.MAX_VALUE;
       }
-      while (level1 > level2 && node1 != null) {
+      while(level1 > level2 && node1 != null) {
         node1 = node1.getParent();
         level1--;
-        cost += node1 == null ? 0 : node1.getCost();
+        cost += node1 == null? 0 : node1.getCost();
       }
-      while (level2 > level1 && node2 != null) {
+      while(level2 > level1 && node2 != null) {
         node2 = node2.getParent();
         level2--;
-        cost += node2 == null ? 0 : node2.getCost();
+        cost += node2 == null? 0 : node2.getCost();
       }
-      while (node1 != null && node2 != null && node1 != node2) {
+      while(node1 != null && node2 != null && node1 != node2) {
         node1 = node1.getParent();
         node2 = node2.getParent();
-        cost += node1 == null ? 0 : node1.getCost();
-        cost += node2 == null ? 0 : node2.getCost();
+        cost += node1 == null? 0 : node1.getCost();
+        cost += node2 == null? 0 : node2.getCost();
       }
       return cost;
     } finally {
@@ -752,7 +752,7 @@ public class NetworkTopologyImpl implements NetworkTopology {
     List<Node> excludedAncestorList =
         NetUtils.getAncestorList(this, mutableExcludedNodes, ancestorGen);
     for (Node ancestor : excludedAncestorList) {
-      if (scope.startsWith(ancestor.getNetworkFullPath())) {
+      if (scope.startsWith(ancestor.getNetworkFullPath())){
         return 0;
       }
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
index fc8e23ba13..47e5de880d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
@@ -28,7 +28,7 @@ public final class NodeSchema {
   /**
    * Network topology layer type enum definition.
    */
-  public enum LayerType {
+  public enum LayerType{
     ROOT("Root", NetConstants.INNER_NODE_COST_DEFAULT),
     INNER_NODE("InnerNode", NetConstants.INNER_NODE_COST_DEFAULT),
     LEAF_NODE("Leaf", NetConstants.NODE_COST_DEFAULT);
@@ -47,7 +47,7 @@ public final class NodeSchema {
       return description;
     }
 
-    public int getCost() {
+    public int getCost(){
       return cost;
     }
     public static LayerType getType(String typeStr) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
index 289f7e6b75..cb9690fe37 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
@@ -68,7 +68,7 @@ public final class NodeSchemaLoader {
 
   private static final int LAYOUT_VERSION = 1;
   private static volatile NodeSchemaLoader instance = null;
-  private NodeSchemaLoader() { }
+  private NodeSchemaLoader() {}
 
   public static NodeSchemaLoader getInstance() {
     if (instance == null) {
@@ -324,7 +324,7 @@ public final class NodeSchemaLoader {
     // Integrity check, only one ROOT and one LEAF is allowed
     boolean foundRoot = false;
     boolean foundLeaf = false;
-    for (NodeSchema schema: schemas.values()) {
+    for(NodeSchema schema: schemas.values()) {
       if (schema.getType() == LayerType.ROOT) {
         if (foundRoot) {
           throw new IllegalArgumentException("Multiple ROOT layers are found" +
@@ -385,7 +385,7 @@ public final class NodeSchemaLoader {
               + "> is null");
         }
         if (TOPOLOGY_PATH.equals(tagName)) {
-          if (value.startsWith(NetConstants.PATH_SEPARATOR_STR)) {
+          if(value.startsWith(NetConstants.PATH_SEPARATOR_STR)) {
             value = value.substring(1);
           }
           String[] layerIDs = value.split(NetConstants.PATH_SEPARATOR_STR);
@@ -403,7 +403,7 @@ public final class NodeSchemaLoader {
             throw new IllegalArgumentException("Topology path doesn't start "
                 + "with ROOT layer");
           }
-          if (schemas.get(layerIDs[layerIDs.length - 1]).getType() !=
+          if (schemas.get(layerIDs[layerIDs.length -1]).getType() !=
               LayerType.LEAF_NODE) {
             throw new IllegalArgumentException("Topology path doesn't end "
                 + "with LEAF layer");
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index f5c0b62100..044f151868 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -415,7 +415,7 @@ public final class Pipeline {
     private Instant creationTimestamp = null;
     private UUID suggestedLeaderId = null;
 
-    public Builder() { }
+    public Builder() {}
 
     public Builder(Pipeline pipeline) {
       this.id = pipeline.id;
@@ -486,10 +486,10 @@ public final class Pipeline {
       if (nodeOrder != null && !nodeOrder.isEmpty()) {
         // This branch is for build from ProtoBuf
         List<DatanodeDetails> nodesWithOrder = new ArrayList<>();
-        for (int i = 0; i < nodeOrder.size(); i++) {
+        for(int i = 0; i < nodeOrder.size(); i++) {
           int nodeIndex = nodeOrder.get(i);
           Iterator<DatanodeDetails> it = nodeStatus.keySet().iterator();
-          while (it.hasNext() && nodeIndex >= 0) {
+          while(it.hasNext() && nodeIndex >= 0) {
             DatanodeDetails node = it.next();
             if (nodeIndex == 0) {
               nodesWithOrder.add(node);
@@ -503,7 +503,7 @@ public final class Pipeline {
               nodesWithOrder, id);
         }
         pipeline.setNodesInOrder(nodesWithOrder);
-      } else if (nodesInOrder != null) {
+      } else if (nodesInOrder != null){
         // This branch is for pipeline clone
         pipeline.setNodesInOrder(nodesInOrder);
       }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index 9f78b3166f..7f8663ef05 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages;
@@ -95,15 +94,6 @@ public interface StorageContainerLocationProtocol extends Closeable {
   ContainerWithPipeline getContainerWithPipeline(long containerID)
       throws IOException;
 
-  /**
-   * Gets the list of ReplicaInfo known by SCM for a given container.
-   * @param containerId ID of the container
-   * @return List of ReplicaInfo for the container or an empty list if none.
-   * @throws IOException
-   */
-  List<HddsProtos.SCMContainerReplicaProto>
-      getContainerReplicas(long containerId) throws IOException;
-
   /**
    * Ask SCM the location of a batch of containers. SCM responds with a group of
    * nodes where these containers and their replicas are located.
@@ -318,20 +308,12 @@ public interface StorageContainerLocationProtocol extends Closeable {
    */
   boolean getReplicationManagerStatus() throws IOException;
 
-  /**
-   * Returns the latest container summary report generated by Replication
-   * Manager.
-   * @return The latest ReplicationManagerReport.
-   * @throws IOException
-   */
-  ReplicationManagerReport getReplicationManagerReport() throws IOException;
-
   /**
    * Start ContainerBalancer.
    */
   boolean startContainerBalancer(Optional<Double> threshold,
-      Optional<Integer> iterations,
-      Optional<Integer> maxDatanodesPercentageToInvolvePerIteration,
+      Optional<Integer> idleiterations,
+      Optional<Double> maxDatanodesRatioToInvolvePerIteration,
       Optional<Long> maxSizeToMovePerIterationInGB,
       Optional<Long> maxSizeEnteringTargetInGB,
       Optional<Long> maxSizeLeavingSourceInGB) throws IOException;
@@ -383,6 +365,4 @@ public interface StorageContainerLocationProtocol extends Closeable {
    * commands operating on {@code containerID}.
    */
   Token<?> getContainerToken(ContainerID containerID) throws IOException;
-
-  long getContainerCount() throws IOException;
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 7f2d2a8bec..fcf3f130f8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -434,7 +434,7 @@ public final class ContainerProtocolCalls  {
     request.setContainerID(containerID);
     request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance());
     request.setDatanodeUuid(id);
-    if (encodedToken != null) {
+    if(encodedToken != null) {
       request.setEncodedToken(encodedToken);
     }
     client.sendCommand(request.build(), getValidatorList());
@@ -458,7 +458,7 @@ public final class ContainerProtocolCalls  {
     request.setContainerID(containerID);
     request.setReadContainer(ReadContainerRequestProto.getDefaultInstance());
     request.setDatanodeUuid(id);
-    if (encodedToken != null) {
+    if(encodedToken != null) {
       request.setEncodedToken(encodedToken);
     }
     ContainerCommandResponseProto response =
@@ -560,8 +560,8 @@ public final class ContainerProtocolCalls  {
     ContainerCommandRequestProto request = builder.build();
     Map<DatanodeDetails, ContainerCommandResponseProto> responses =
             xceiverClient.sendCommandOnAllNodes(request);
-    for (Map.Entry<DatanodeDetails, ContainerCommandResponseProto> entry:
-           responses.entrySet()) {
+    for(Map.Entry<DatanodeDetails, ContainerCommandResponseProto> entry:
+           responses.entrySet()){
       datanodeToResponseMap.put(entry.getKey(), entry.getValue().getGetBlock());
     }
     return datanodeToResponseMap;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java
index 8cd68a0125..3195e008cc 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java
@@ -37,7 +37,7 @@ public final class HddsVersionInfo {
   public static final VersionInfo HDDS_VERSION_INFO =
       new VersionInfo("hdds");
 
-  private HddsVersionInfo() { }
+  private HddsVersionInfo() {}
 
   public static void main(String[] args) {
     System.out.println("Using HDDS " + HDDS_VERSION_INFO.getVersion());
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java
index e1e959823e..96d59963ef 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java
@@ -90,7 +90,7 @@ public class ResourceSemaphore extends Semaphore {
 
   @Override
   public String toString() {
-    return (isClosed() ? "closed/" : availablePermits() + "/") + limit;
+    return (isClosed()? "closed/": availablePermits() + "/") + limit;
   }
 
   /**
@@ -101,7 +101,7 @@ public class ResourceSemaphore extends Semaphore {
 
     public Group(int... limits) {
       final List<ResourceSemaphore> list = new ArrayList<>(limits.length);
-      for (int limit : limits) {
+      for(int limit : limits) {
         list.add(new ResourceSemaphore(limit));
       }
       this.resources = Collections.unmodifiableList(list);
@@ -131,7 +131,7 @@ public class ResourceSemaphore extends Semaphore {
       }
 
       // failed at i, releasing all previous resources
-      for (i--; i >= 0; i--) {
+      for(i--; i >= 0; i--) {
         resources.get(i).release(permits[i]);
       }
       return false;
@@ -147,13 +147,13 @@ public class ResourceSemaphore extends Semaphore {
     }
 
     protected void release(int... permits) {
-      for (int i = resources.size() - 1; i >= 0; i--) {
+      for(int i = resources.size() - 1; i >= 0; i--) {
         resources.get(i).release(permits[i]);
       }
     }
 
     public void close() {
-      for (int i = resources.size() - 1; i >= 0; i--) {
+      for(int i = resources.size() - 1; i >= 0; i--) {
         resources.get(i).close();
       }
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
index 6fff80f675..ba062bcae1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
@@ -51,7 +51,7 @@ public final class UniqueId {
   /**
    * Private constructor so that no one can instantiate this class.
    */
-  private UniqueId() { }
+  private UniqueId() {}
 
   /**
    * Calculate and returns next unique id based on System#currentTimeMillis.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index bdc87899a4..a373fd19e6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -89,7 +89,7 @@ public final class OzoneConsts {
   // OM Http server endpoints
   public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT =
       "/serviceList";
-  public static final String OZONE_DB_CHECKPOINT_HTTP_ENDPOINT =
+  public static final String OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT =
       "/dbCheckpoint";
 
   // Ozone File System scheme
@@ -128,8 +128,8 @@ public final class OzoneConsts {
   public static final String CONTAINER_DB_SUFFIX = "container.db";
   public static final String PIPELINE_DB_SUFFIX = "pipeline.db";
   public static final String CRL_DB_SUFFIX = "crl.db";
-  public static final String DN_CONTAINER_DB = "-dn-" + CONTAINER_DB_SUFFIX;
-  public static final String DN_CRL_DB = "dn-" + CRL_DB_SUFFIX;
+  public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
+  public static final String DN_CRL_DB = "dn-"+ CRL_DB_SUFFIX;
   public static final String CRL_DB_DIRECTORY_NAME = "crl";
   public static final String OM_DB_NAME = "om.db";
   public static final String SCM_DB_NAME = "scm.db";
@@ -187,7 +187,7 @@ public final class OzoneConsts {
 
   public static final String OM_KEY_PREFIX = "/";
   public static final String OM_USER_PREFIX = "$";
-  public static final String OM_S3_PREFIX = "S3:";
+  public static final String OM_S3_PREFIX ="S3:";
   public static final String OM_S3_VOLUME_PREFIX = "s3";
   public static final String OM_S3_SECRET = "S3Secret:";
   public static final String OM_PREFIX = "Prefix:";
@@ -212,7 +212,7 @@ public final class OzoneConsts {
   /**
    * Quota Units.
    */
-  public enum Units { TB, GB, MB, KB, B }
+  public enum Units {TB, GB, MB, KB, B}
 
   /**
    * Max number of keys returned per list buckets operation.
@@ -333,7 +333,6 @@ public final class OzoneConsts {
   public static final String UNDELETED_KEYS_LIST = "unDeletedKeysList";
   public static final String SOURCE_VOLUME = "sourceVolume";
   public static final String SOURCE_BUCKET = "sourceBucket";
-  public static final String BUCKET_LAYOUT = "bucketLayout";
 
 
 
@@ -390,7 +389,7 @@ public final class OzoneConsts {
   public static final Pattern KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX  =
           Pattern.compile("^[^^{}<>^?%~#`\\[\\]\\|\\\\(\\x80-\\xff)]+$");
 
-  public static final String FS_FILE_COPYING_TEMP_SUFFIX = "._COPYING_";
+  public static final String FS_FILE_COPYING_TEMP_SUFFIX= "._COPYING_";
 
   // Transaction Info
   public static final String TRANSACTION_INFO_KEY = "#TRANSACTIONINFO";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
index 6c20968c8d..098ab6b2f7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
@@ -26,7 +26,7 @@ public enum AuditEventStatus {
 
   private String status;
 
-  AuditEventStatus(String status) {
+  AuditEventStatus(String status){
     this.status = status;
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
index 9f1f5f0e22..ee6f45dadb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
@@ -38,7 +38,7 @@ public class AuditLogger {
    * Parametrized Constructor to initialize logger.
    * @param type Audit Logger Type
    */
-  public AuditLogger(AuditLoggerType type) {
+  public AuditLogger(AuditLoggerType type){
     initializeLogger(type);
   }
 
@@ -46,7 +46,7 @@ public class AuditLogger {
    * Initializes the logger with specific type.
    * @param loggerType specified one of the values from enum AuditLoggerType.
    */
-  private void initializeLogger(AuditLoggerType loggerType) {
+  private void initializeLogger(AuditLoggerType loggerType){
     this.logger = LogManager.getContext(false).getLogger(loggerType.getType());
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
index dbfde9f555..18241c7712 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
@@ -31,7 +31,7 @@ public enum AuditLoggerType {
     return type;
   }
 
-  AuditLoggerType(String type) {
+  AuditLoggerType(String type){
     this.type = type;
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
index 3414aa403b..505b958071 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
@@ -28,11 +28,11 @@ public enum AuditMarker {
 
   private Marker marker;
 
-  AuditMarker(Marker marker) {
+  AuditMarker(Marker marker){
     this.marker = marker;
   }
 
-  public Marker getMarker() {
+  public Marker getMarker(){
     return marker;
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
index 9d28c9f43e..6f3bbadaec 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
@@ -64,12 +64,12 @@ public final class AuditMessage implements Message {
     private Map<String, String> params;
     private String ret;
 
-    public Builder setUser(String usr) {
+    public Builder setUser(String usr){
       this.user = usr;
       return this;
     }
 
-    public Builder atIp(String ipAddr) {
+    public Builder atIp(String ipAddr){
       this.ip = ipAddr;
       return this;
     }
@@ -79,7 +79,7 @@ public final class AuditMessage implements Message {
       return this;
     }
 
-    public Builder withParams(Map<String, String> args) {
+    public Builder withParams(Map<String, String> args){
       this.params = args;
       return this;
     }
@@ -89,12 +89,12 @@ public final class AuditMessage implements Message {
       return this;
     }
 
-    public Builder withException(Throwable ex) {
+    public Builder withException(Throwable ex){
       this.throwable = ex;
       return this;
     }
 
-    public AuditMessage build() {
+    public AuditMessage build(){
       String message = "user=" + this.user + " | ip=" + this.ip + " | " +
           "op=" + this.op + " " + this.params + " | " + "ret=" + this.ret;
       return new AuditMessage(message, throwable);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
index 3c1c209867..9b88c6a1d1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
@@ -48,8 +48,7 @@ public enum SCMAction implements AuditAction {
   STOP_CONTAINER_BALANCER,
   GET_CONTAINER_BALANCER_STATUS,
   GET_CONTAINER_WITH_PIPELINE_BATCH,
-  ADD_SCM,
-  GET_REPLICATION_MANAGER_REPORT;
+  ADD_SCM;
 
   @Override
   public String getAction() {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java
index 6187d6bd9c..7ce643db47 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java
@@ -80,7 +80,7 @@ public interface ChecksumByteBuffer extends Checksum {
     }
 
     private static int update(int crc, ByteBuffer b, int[] table) {
-      for (; b.remaining() > 7;) {
+      for(; b.remaining() > 7;) {
         final int c0 = (b.get() ^ crc) & 0xff;
         final int c1 = (b.get() ^ (crc >>>= 8)) & 0xff;
         final int c2 = (b.get() ^ (crc >>>= 8)) & 0xff;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java
index 5a63c09f12..7622ffc001 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java
@@ -57,9 +57,8 @@ final class IncrementalChunkBuffer implements ChunkBuffer {
     Preconditions.checkArgument(increment > 0);
     this.limit = limit;
     this.increment = increment;
-    this.limitIndex = limit / increment;
-    this.buffers = new ArrayList<>(
-        limitIndex + (limit % increment == 0 ? 0 : 1));
+    this.limitIndex = limit/increment;
+    this.buffers = new ArrayList<>(limitIndex + (limit%increment == 0? 0: 1));
     this.isDuplicated = isDuplicated;
   }
 
@@ -67,7 +66,7 @@ final class IncrementalChunkBuffer implements ChunkBuffer {
   private int getBufferCapacityAtIndex(int i) {
     Preconditions.checkArgument(i >= 0);
     Preconditions.checkArgument(i <= limitIndex);
-    return i < limitIndex ? increment : limit % increment;
+    return i < limitIndex? increment: limit%increment;
   }
 
   private void assertInt(int expected, int computed, String name, int i) {
@@ -127,7 +126,7 @@ final class IncrementalChunkBuffer implements ChunkBuffer {
     Preconditions.checkArgument(position < limit);
     final int i = position / increment;
     final ByteBuffer ith = getAndAllocateAtIndex(i);
-    assertInt(position % increment, ith.position(), "position", i);
+    assertInt(position%increment, ith.position(), "position", i);
     return ith;
   }
 
@@ -208,7 +207,7 @@ final class IncrementalChunkBuffer implements ChunkBuffer {
     }
 
     final int thatLimit = that.limit();
-    for (int p = position(); that.position() < thatLimit;) {
+    for(int p = position(); that.position() < thatLimit;) {
       final ByteBuffer b = getAndAllocateAtPosition(p);
       final int min = Math.min(b.remaining(), thatLimit - that.position());
       that.limit(that.position() + min);
@@ -230,7 +229,7 @@ final class IncrementalChunkBuffer implements ChunkBuffer {
     final int pr = newPosition % increment;
     final int li = newLimit / increment;
     final int lr = newLimit % increment;
-    final int newSize = lr == 0 ? li : li + 1;
+    final int newSize = lr == 0? li: li + 1;
 
     for (int i = 0; i < newSize; i++) {
       final int pos = i < pi ? increment : i == pi ? pr : 0;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
index 6ba438456e..e6e1df5135 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
@@ -109,7 +109,7 @@ public class StorageInfo {
 
   public Long  getCreationTime() {
     String creationTime = properties.getProperty(CREATION_TIME);
-    if (creationTime != null) {
+    if(creationTime != null) {
       return Long.parseLong(creationTime);
     }
     return null;
@@ -117,7 +117,7 @@ public class StorageInfo {
 
   public int getLayoutVersion() {
     String layout = properties.getProperty(LAYOUT_VERSION);
-    if (layout != null) {
+    if(layout != null) {
       return Integer.parseInt(layout);
     }
     return 0;
@@ -166,7 +166,7 @@ public class StorageInfo {
       throws InconsistentStorageStateException {
     NodeType nodeType = getNodeType();
     Preconditions.checkNotNull(nodeType);
-    if (type != nodeType) {
+    if(type != nodeType) {
       throw new InconsistentStorageStateException("Expected NodeType: " + type +
           ", but found: " + nodeType);
     }
@@ -176,7 +176,7 @@ public class StorageInfo {
       throws InconsistentStorageStateException {
     String clusterId = getClusterID();
     Preconditions.checkNotNull(clusterId);
-    if (clusterId.isEmpty()) {
+    if(clusterId.isEmpty()) {
       throw new InconsistentStorageStateException("Cluster ID not found");
     }
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java
index ebc4bba209..a9de8922b5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java
@@ -50,7 +50,7 @@ public class RatisSnapshotInfo implements SnapshotInfo {
     this.snapshotIndex = newIndex;
   }
 
-  public RatisSnapshotInfo() { }
+  public RatisSnapshotInfo() {}
 
   public RatisSnapshotInfo(long term, long index) {
     this.term = term;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
index 434e497e23..8ea16897e1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
@@ -161,8 +161,8 @@ public class BlockData {
    * @return list of chunkinfo.
    */
   public List<ContainerProtos.ChunkInfo> getChunks() {
-    return chunkList == null ? Collections.emptyList()
-        : chunkList instanceof ContainerProtos.ChunkInfo ?
+    return chunkList == null? Collections.emptyList()
+        : chunkList instanceof ContainerProtos.ChunkInfo?
             Collections.singletonList((ContainerProtos.ChunkInfo)chunkList)
         : Collections.unmodifiableList(castChunkList());
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
index a13f164eec..7773828e2d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
@@ -45,7 +45,7 @@ public final class ContainerCommandRequestPBHelper {
     Map<String, String> auditParams = new TreeMap<>();
     Type cmdType = msg.getCmdType();
     String containerID = String.valueOf(msg.getContainerID());
-    switch (cmdType) {
+    switch(cmdType) {
     case CreateContainer:
       auditParams.put("containerID", containerID);
       auditParams.put("containerType",
@@ -75,11 +75,11 @@ public final class ContainerCommandRequestPBHelper {
       return auditParams;
 
     case PutBlock:
-      try {
+      try{
         auditParams.put("blockData",
             BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData())
                 .toString());
-      } catch (IOException ex) {
+      } catch (IOException ex){
         if (LOG.isTraceEnabled()) {
           LOG.trace("Encountered error parsing BlockData from protobuf: "
               + ex.getMessage());
@@ -132,11 +132,11 @@ public final class ContainerCommandRequestPBHelper {
     case CompactChunk: return null; //CompactChunk operation
 
     case PutSmallFile:
-      try {
+      try{
         auditParams.put("blockData",
             BlockData.getFromProtoBuf(msg.getPutSmallFile()
                 .getBlock().getBlockData()).toString());
-      } catch (IOException ex) {
+      } catch (IOException ex){
         if (LOG.isTraceEnabled()) {
           LOG.trace("Encountered error parsing BlockData from protobuf: "
               + ex.getMessage());
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
index 2fd7a9d494..e95105b074 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
@@ -90,7 +90,7 @@ public class Lease<T> {
    */
   public void registerCallBack(Callable<Void> callback)
       throws LeaseExpiredException {
-    if (hasExpired()) {
+    if(hasExpired()) {
       throw new LeaseExpiredException(messageForResource(resource));
     }
     callbacks.add(callback);
@@ -104,7 +104,7 @@ public class Lease<T> {
    *         If the lease has already timed out
    */
   public long getElapsedTime() throws LeaseExpiredException {
-    if (hasExpired()) {
+    if(hasExpired()) {
       throw new LeaseExpiredException(messageForResource(resource));
     }
     return Time.monotonicNow() - creationTime;
@@ -129,7 +129,7 @@ public class Lease<T> {
    *         If the lease has already timed out
    */
   public long getLeaseLifeTime() throws LeaseExpiredException {
-    if (hasExpired()) {
+    if(hasExpired()) {
       throw new LeaseExpiredException(messageForResource(resource));
     }
     return leaseTimeout.get();
@@ -144,7 +144,7 @@ public class Lease<T> {
    *         If the lease has already timed out
    */
   public void renew(long timeout) throws LeaseExpiredException {
-    if (hasExpired()) {
+    if(hasExpired()) {
       throw new LeaseExpiredException(messageForResource(resource));
     }
     leaseTimeout.addAndGet(timeout);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
index 3f2d5fbe97..a79d5178e7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
@@ -53,7 +53,7 @@ public class LeaseCallbackExecutor<T> implements Runnable {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Executing callbacks for lease on {}", resource);
     }
-    for (Callable<Void> callback : callbacks) {
+    for(Callable<Void> callback : callbacks) {
       try {
         callback.call();
       } catch (Exception e) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java
index b3ffe59f1d..45f0638b99 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java
@@ -104,7 +104,7 @@ public final class ShutdownHookManager {
               long ended = System.currentTimeMillis();
               LOG.debug(String.format(
                   "Completed shutdown in %.3f seconds; Timeouts: %d",
-                  (ended - started) / 1000.0, timeoutCount));
+                  (ended-started)/1000.0, timeoutCount));
               // each of the hooks have executed; now shut down the
               // executor itself.
               shutdownExecutor(new OzoneConfiguration());
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index b4517c687e..e74621506c 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -722,11 +722,11 @@
     </description>
   </property>
   <property>
-    <name>ozone.scm.container.layout</name>
+    <name>ozone.scm.chunk.layout</name>
     <value>FILE_PER_BLOCK</value>
     <tag>OZONE, SCM, CONTAINER, PERFORMANCE</tag>
     <description>
-      Container layout defines how chunks, blocks and containers are stored on disk.
+      Chunk layout defines how chunks, blocks and containers are stored on disk.
       Each chunk is stored separately with FILE_PER_CHUNK.  All chunks of a
       block are stored in the same file with FILE_PER_BLOCK.  The default is
       FILE_PER_BLOCK.
@@ -2014,7 +2014,7 @@
   </property>
   <property>
     <name>ozone.scm.ratis.enable</name>
-    <value>true</value>
+    <value>false</value>
     <tag>OZONE, SCM, HA, RATIS</tag>
     <description>Property to enable or disable Ratis server on SCM.
       Please note - this is a temporary property to disable SCM Ratis server.
@@ -2518,24 +2518,6 @@
       OM snapshot.
     </description>
   </property>
-  <property>
-    <name>ozone.recon.scm.connection.request.timeout</name>
-    <value>5s</value>
-    <tag>OZONE, RECON, SCM</tag>
-    <description>
-      Connection request timeout in milliseconds for HTTP call made by Recon to
-      request SCM DB snapshot.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.scm.connection.timeout</name>
-    <value>5s</value>
-    <tag>OZONE, RECON, SCM</tag>
-    <description>
-      Connection timeout for HTTP call in milliseconds made by Recon to request
-      SCM snapshot.
-    </description>
-  </property>
   <property>
     <name>ozone.recon.om.socket.timeout</name>
     <value>5s</value>
@@ -2569,41 +2551,6 @@
       Request to flush the OM DB before taking checkpoint snapshot.
     </description>
   </property>
-  <property>
-    <name>recon.om.delta.update.limit</name>
-    <value>2000</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      Recon each time get a limited delta updates from OM.
-      The actual fetched data might be larger than this limit.
-    </description>
-  </property>
-  <property>
-    <name>recon.om.delta.update.loop.limit</name>
-    <value>10</value>
-    <tag>OZONE, RECON</tag>
-    <description>
-      The sync between Recon and OM consists of several small
-      fetch loops.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.scm.container.threshold</name>
-    <value>100</value>
-    <tag>OZONE, RECON, SCM</tag>
-    <description>
-      Threshold value for the difference in number of containers
-      in SCM and RECON.
-    </description>
-  </property>
-  <property>
-    <name>ozone.recon.scm.snapshot.enabled</name>
-    <value>false</value>
-    <tag>OZONE, RECON, SCM</tag>
-    <description>
-      If enabled, SCM DB Snapshot is taken by Recon.
-    </description>
-  </property>
   <property>
     <name>hdds.tracing.enabled</name>
     <value>false</value>
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
index fd8aa28e63..ef93927ee4 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
@@ -122,7 +122,7 @@ public class TestHddsUtils {
     assertThat(addresses.size(), is(3));
     it = addresses.iterator();
     HashMap<String, Integer> expected1 = new HashMap<>(hostsAndPorts);
-    while (it.hasNext()) {
+    while(it.hasNext()) {
       InetSocketAddress current = it.next();
       assertTrue(expected1.remove(current.getHostName(),
           current.getPort()));
@@ -136,7 +136,7 @@ public class TestHddsUtils {
     assertThat(addresses.size(), is(3));
     it = addresses.iterator();
     HashMap<String, Integer> expected2 = new HashMap<>(hostsAndPorts);
-    while (it.hasNext()) {
+    while(it.hasNext()) {
       InetSocketAddress current = it.next();
       assertTrue(expected2.remove(current.getHostName(),
           current.getPort()));
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
index 1315ad5ec8..9adf8f7fbf 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
@@ -167,7 +167,7 @@ public class TestReplicationConfig {
   @Test
   public void testValidationBasedOnConfig() {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OZONE_REPLICATION + ".allowed-configs",
+    conf.set(OZONE_REPLICATION+".allowed-configs",
         "^STANDALONE/ONE|RATIS/THREE$");
     conf.set(OZONE_REPLICATION, factor);
     conf.set(OZONE_REPLICATION_TYPE, type);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
index 0e817116da..8a177042a6 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
@@ -22,11 +22,9 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
-import java.net.URL;
 import java.nio.charset.StandardCharsets;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 
 import org.junit.Assert;
@@ -192,32 +190,6 @@ public class TestOzoneConfiguration {
         subject.getTimeDuration("test.scm.client.wait", 555, TimeUnit.SECONDS));
   }
 
-  @Test
-  public void testInstantiationWithInputConfiguration() throws IOException {
-    String key = "hdds.scm.init.default.layout.version";
-    String val = "Test1";
-    Configuration configuration = new Configuration(true);
-
-    File ozoneSite = tempConfigs.newFile("ozone-site.xml");
-    FileOutputStream ozoneSiteStream = new FileOutputStream(ozoneSite);
-    try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter(
-        ozoneSiteStream, StandardCharsets.UTF_8))) {
-      startConfig(out);
-      appendProperty(out, key, val);
-      endConfig(out);
-    }
-    configuration
-        .addResource(new URL("file:///" + ozoneSite.getAbsolutePath()));
-
-    OzoneConfiguration ozoneConfiguration =
-        new OzoneConfiguration(configuration);
-    // ozoneConfig value matches input config value for the corresponding key
-    Assert.assertEquals(val, ozoneConfiguration.get(key));
-    Assert.assertEquals(val, configuration.get(key));
-
-    Assert.assertNotEquals(val, new OzoneConfiguration().get(key));
-  }
-
   @Test
   public void setConfigFromObjectWithObjectDefaults() {
     // GIVEN
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
index c9ed258f24..b057349ed8 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
@@ -96,7 +96,7 @@ public class TestDU {
 
     long usedSpace = du.getUsedSpace();
 
-    assertFileSize(4 * KB, usedSpace);
+    assertFileSize(4*KB, usedSpace);
   }
 
   private static void assertFileSize(long expected, long actual) {
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java
index d3ddbe0ef3..469faac744 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java
@@ -45,7 +45,7 @@ public class TestContainerCommandRequestMessage {
 
   static ByteString newData(int length) {
     final ByteString.Output out = ByteString.newOutput();
-    for (int i = 0; i < length; i++) {
+    for(int i = 0; i < length; i++) {
       out.write(RANDOM.nextInt());
     }
     return out.toByteString();
@@ -128,10 +128,10 @@ public class TestContainerCommandRequestMessage {
   static void runTest(
       BiFunction<BlockID, ByteString, ContainerCommandRequestProto> method)
       throws Exception {
-    for (int i = 0; i < 2; i++) {
+    for(int i = 0; i < 2; i++) {
       runTest(i, method);
     }
-    for (int i = 2; i < 1 << 10;) {
+    for(int i = 2; i < 1 << 10;) {
       runTest(i + 1 + RANDOM.nextInt(i - 1), method);
       i <<= 1;
       runTest(i, method);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplicaInfo.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplicaInfo.java
deleted file mode 100644
index 195baca2db..0000000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplicaInfo.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.UUID;
-
-/**
- * Test for the ContainerReplicaInfo class.
- */
-public class TestContainerReplicaInfo {
-
-  @Test
-  public void testObjectCreatedFromProto() {
-    HddsProtos.SCMContainerReplicaProto proto =
-        HddsProtos.SCMContainerReplicaProto.newBuilder()
-            .setKeyCount(10)
-            .setBytesUsed(12345)
-            .setContainerID(567)
-            .setPlaceOfBirth(UUID.randomUUID().toString())
-            .setSequenceID(5)
-            .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()
-                .getProtoBufMessage())
-            .setState("OPEN")
-            .build();
-
-    ContainerReplicaInfo info = ContainerReplicaInfo.fromProto(proto);
-
-    Assert.assertEquals(proto.getContainerID(), info.getContainerID());
-    Assert.assertEquals(proto.getBytesUsed(), info.getBytesUsed());
-    Assert.assertEquals(proto.getKeyCount(), info.getKeyCount());
-    Assert.assertEquals(proto.getPlaceOfBirth(),
-        info.getPlaceOfBirth().toString());
-    Assert.assertEquals(DatanodeDetails.getFromProtoBuf(
-        proto.getDatanodeDetails()), info.getDatanodeDetails());
-    Assert.assertEquals(proto.getSequenceID(), info.getSequenceId());
-    Assert.assertEquals(proto.getState(), info.getState());
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
deleted file mode 100644
index a05f9abb2b..0000000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- * Tests for the ReplicationManagerReport class.
- */
-public class TestReplicationManagerReport {
-
-  private ReplicationManagerReport report;
-
-  @Before
-  public void setup() {
-    report = new ReplicationManagerReport();
-  }
-
-  @Test
-  public void testMetricCanBeIncremented() {
-    report.increment(ReplicationManagerReport.HealthState.UNDER_REPLICATED);
-    report.increment(ReplicationManagerReport.HealthState.UNDER_REPLICATED);
-    report.increment(ReplicationManagerReport.HealthState.OVER_REPLICATED);
-
-    report.increment(HddsProtos.LifeCycleState.OPEN);
-    report.increment(HddsProtos.LifeCycleState.CLOSED);
-    report.increment(HddsProtos.LifeCycleState.CLOSED);
-
-    Assert.assertEquals(2,
-        report.getStat(ReplicationManagerReport.HealthState.UNDER_REPLICATED));
-    Assert.assertEquals(1,
-        report.getStat(ReplicationManagerReport.HealthState.OVER_REPLICATED));
-    Assert.assertEquals(0,
-        report.getStat(ReplicationManagerReport.HealthState.MIS_REPLICATED));
-
-    Assert.assertEquals(1,
-        report.getStat(HddsProtos.LifeCycleState.OPEN));
-    Assert.assertEquals(2,
-        report.getStat(HddsProtos.LifeCycleState.CLOSED));
-    Assert.assertEquals(0,
-        report.getStat(HddsProtos.LifeCycleState.QUASI_CLOSED));
-  }
-
-  @Test
-  public void testContainerIDsCanBeSampled() {
-    report.incrementAndSample(
-        ReplicationManagerReport.HealthState.UNDER_REPLICATED,
-        new ContainerID(1));
-    report.incrementAndSample(
-        ReplicationManagerReport.HealthState.UNDER_REPLICATED,
-        new ContainerID(2));
-    report.incrementAndSample(
-        ReplicationManagerReport.HealthState.OVER_REPLICATED,
-        new ContainerID(3));
-
-    Assert.assertEquals(2,
-        report.getStat(ReplicationManagerReport.HealthState.UNDER_REPLICATED));
-    Assert.assertEquals(1,
-        report.getStat(ReplicationManagerReport.HealthState.OVER_REPLICATED));
-    Assert.assertEquals(0,
-        report.getStat(ReplicationManagerReport.HealthState.MIS_REPLICATED));
-
-    List<ContainerID> sample =
-        report.getSample(ReplicationManagerReport.HealthState.UNDER_REPLICATED);
-    Assert.assertEquals(new ContainerID(1), sample.get(0));
-    Assert.assertEquals(new ContainerID(2), sample.get(1));
-    Assert.assertEquals(2, sample.size());
-
-    sample =
-        report.getSample(ReplicationManagerReport.HealthState.OVER_REPLICATED);
-    Assert.assertEquals(new ContainerID(3), sample.get(0));
-    Assert.assertEquals(1, sample.size());
-
-    sample =
-        report.getSample(ReplicationManagerReport.HealthState.MIS_REPLICATED);
-    Assert.assertEquals(0, sample.size());
-  }
-
-  @Test
-  public void testSamplesAreLimited() {
-    for (int i = 0; i < ReplicationManagerReport.SAMPLE_LIMIT * 2; i++) {
-      report.incrementAndSample(
-          ReplicationManagerReport.HealthState.UNDER_REPLICATED,
-          new ContainerID(i));
-    }
-    List<ContainerID> sample =
-        report.getSample(ReplicationManagerReport.HealthState.UNDER_REPLICATED);
-    Assert.assertEquals(ReplicationManagerReport.SAMPLE_LIMIT, sample.size());
-    for (int i = 0; i < ReplicationManagerReport.SAMPLE_LIMIT; i++) {
-      Assert.assertEquals(new ContainerID(i), sample.get(i));
-    }
-  }
-
-  @Test
-  public void testSerializeToProtoAndBack() {
-    report.setTimestamp(12345);
-    Random rand = ThreadLocalRandom.current();
-    for (HddsProtos.LifeCycleState s : HddsProtos.LifeCycleState.values()) {
-      report.setStat(s.toString(), rand.nextInt(Integer.MAX_VALUE));
-    }
-    for (ReplicationManagerReport.HealthState s :
-        ReplicationManagerReport.HealthState.values()) {
-      report.setStat(s.toString(), rand.nextInt(Integer.MAX_VALUE));
-      List<ContainerID> containers = new ArrayList<>();
-      for (int i = 0; i < 10; i++) {
-        containers.add(ContainerID.valueOf(rand.nextInt(Integer.MAX_VALUE)));
-      }
-      report.setSample(s.toString(), containers);
-    }
-    HddsProtos.ReplicationManagerReportProto proto = report.toProtobuf();
-    ReplicationManagerReport newReport
-        = ReplicationManagerReport.fromProtobuf(proto);
-    Assert.assertEquals(report.getReportTimeStamp(),
-        newReport.getReportTimeStamp());
-
-    for (HddsProtos.LifeCycleState s : HddsProtos.LifeCycleState.values()) {
-      Assert.assertEquals(report.getStat(s), newReport.getStat(s));
-    }
-
-    for (ReplicationManagerReport.HealthState s :
-        ReplicationManagerReport.HealthState.values()) {
-      Assert.assertTrue(report.getSample(s).equals(newReport.getSample(s)));
-    }
-  }
-
-  @Test(expected = IllegalStateException.class)
-  public void testStatCannotBeSetTwice() {
-    report.setStat(HddsProtos.LifeCycleState.CLOSED.toString(), 10);
-    report.setStat(HddsProtos.LifeCycleState.CLOSED.toString(), 10);
-  }
-
-  @Test(expected = IllegalStateException.class)
-  public void testSampleCannotBeSetTwice() {
-    List<ContainerID> containers = new ArrayList<>();
-    containers.add(ContainerID.valueOf(1));
-    report.setSample(HddsProtos.LifeCycleState.CLOSED.toString(), containers);
-    report.setSample(HddsProtos.LifeCycleState.CLOSED.toString(), containers);
-  }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
deleted file mode 100644
index 2f459fbcba..0000000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- Test cases for SCM container client classes.
- */
-package org.apache.hadoop.hdds.scm.container;
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java
index e561bb7ccc..12a024005a 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java
@@ -93,7 +93,7 @@ public class TestSCMNodeInfo {
     int count = 1;
     for (SCMNodeInfo scmNodeInfo : scmNodeInfos) {
       Assert.assertEquals(scmServiceId, scmNodeInfo.getServiceId());
-      Assert.assertEquals("scm" + count++, scmNodeInfo.getNodeId());
+      Assert.assertEquals("scm"+count++, scmNodeInfo.getNodeId());
       Assert.assertEquals("localhost:" + ++port,
           scmNodeInfo.getBlockClientAddress());
       Assert.assertEquals("localhost:" + ++port,
@@ -117,7 +117,7 @@ public class TestSCMNodeInfo {
     int count = 1;
     for (SCMNodeInfo scmNodeInfo : scmNodeInfos) {
       Assert.assertEquals(scmServiceId, scmNodeInfo.getServiceId());
-      Assert.assertEquals("scm" + count++, scmNodeInfo.getNodeId());
+      Assert.assertEquals("scm"+count++, scmNodeInfo.getNodeId());
       Assert.assertEquals("localhost:" + OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT,
           scmNodeInfo.getBlockClientAddress());
       Assert.assertEquals("localhost:" +
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
index e50eca2e69..0008e6670a 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
@@ -171,7 +171,7 @@ public class TestNetworkTopologyImpl {
   @Test
   public void testContains() {
     Node nodeNotInMap = createDatanode("8.8.8.8", "/d2/r4");
-    for (int i = 0; i < dataNodes.length; i++) {
+    for (int i=0; i < dataNodes.length; i++) {
       assertTrue(cluster.contains(dataNodes[i]));
     }
     assertFalse(cluster.contains(nodeNotInMap));
@@ -238,7 +238,7 @@ public class TestNetworkTopologyImpl {
     assumeTrue(cluster.getMaxLevel() > 2);
     int maxLevel = cluster.getMaxLevel();
     assertTrue(cluster.isSameParent(dataNodes[0], dataNodes[1]));
-    while (maxLevel > 1) {
+    while(maxLevel > 1) {
       assertTrue(cluster.isSameAncestor(dataNodes[0], dataNodes[1],
           maxLevel - 1));
       maxLevel--;
@@ -262,17 +262,17 @@ public class TestNetworkTopologyImpl {
 
   @Test
   public void testAddRemove() {
-    for (int i = 0; i < dataNodes.length; i++) {
+    for(int i = 0; i < dataNodes.length; i++) {
       cluster.remove(dataNodes[i]);
     }
-    for (int i = 0; i < dataNodes.length; i++) {
+    for(int i = 0; i < dataNodes.length; i++) {
       assertFalse(cluster.contains(dataNodes[i]));
     }
     // no leaf nodes
     assertEquals(0, cluster.getNumOfLeafNode(null));
     // no inner nodes
     assertEquals(0, cluster.getNumOfNodes(2));
-    for (int i = 0; i < dataNodes.length; i++) {
+    for(int i = 0; i < dataNodes.length; i++) {
       cluster.add(dataNodes[i]);
     }
     // Inner nodes are created automatically
@@ -467,10 +467,10 @@ public class TestNetworkTopologyImpl {
         }};
     int leafNum = cluster.getNumOfLeafNode(null);
     Map<Node, Integer> frequency;
-    for (Node[] list : excludedNodeLists) {
+    for(Node[] list : excludedNodeLists) {
       List<Node> excludedList = Arrays.asList(list);
       int ancestorGen = 0;
-      while (ancestorGen < cluster.getMaxLevel()) {
+      while(ancestorGen < cluster.getMaxLevel()) {
         frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen);
         List<Node> ancestorList = NetUtils.getAncestorList(cluster,
             excludedList, ancestorGen);
@@ -490,7 +490,7 @@ public class TestNetworkTopologyImpl {
     // all nodes excluded, no node will be picked
     List<Node> excludedList = Arrays.asList(dataNodes);
     int ancestorGen = 0;
-    while (ancestorGen < cluster.getMaxLevel()) {
+    while(ancestorGen < cluster.getMaxLevel()) {
       frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen);
       for (Node key : dataNodes) {
         assertTrue(frequency.get(key) == 0);
@@ -500,7 +500,7 @@ public class TestNetworkTopologyImpl {
     // out scope excluded nodes, each node will be picked
     excludedList = Arrays.asList(createDatanode("1.1.1.1.", "/city1/rack1"));
     ancestorGen = 0;
-    while (ancestorGen < cluster.getMaxLevel()) {
+    while(ancestorGen < cluster.getMaxLevel()) {
       frequency = pickNodes(leafNum, null, excludedList, null, ancestorGen);
       for (Node key : dataNodes) {
         assertTrue(frequency.get(key) != 0);
@@ -536,7 +536,7 @@ public class TestNetworkTopologyImpl {
       while (!path.equals(ROOT)) {
         scope = "~" + path;
         int ancestorGen = 0;
-        while (ancestorGen < cluster.getMaxLevel()) {
+        while(ancestorGen < cluster.getMaxLevel()) {
           for (Node[] list : excludedNodeLists) {
             List<Node> excludedList = Arrays.asList(list);
             frequency =
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java
index 00124d9cdd..b7b3dc6340 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java
@@ -51,7 +51,7 @@ public final class MockPipeline {
   public static Pipeline createPipeline(int numNodes) throws IOException {
     Preconditions.checkArgument(numNodes >= 1);
     final List<DatanodeDetails> ids = new ArrayList<>(numNodes);
-    for (int i = 0; i < numNodes; i++) {
+    for(int i = 0; i < numNodes; i++) {
       ids.add(MockDatanodeDetails.randomLocalDatanodeDetails());
     }
     return createPipeline(ids);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
index f77e84a1db..f756008a4c 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -23,7 +23,7 @@ import io.jaegertracing.internal.exceptions.MalformedTracerStateStringException;
 import org.apache.ozone.test.LambdaTestUtils;
 import org.junit.jupiter.api.Test;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 class TestStringCodec {
 
@@ -44,17 +44,9 @@ class TestStringCodec {
         "String does not match tracer state format",
         () -> codec.extract(sb));
     sb.append(":66");
-
     JaegerSpanContext context = codec.extract(sb);
-    StringBuilder injected = new StringBuilder();
-    codec.inject(context, injected);
-
-    String expectedTraceId = pad("123");
-    assertEquals(expectedTraceId, context.getTraceId());
-    assertEquals(expectedTraceId + ":456:789:66", injected.toString());
-  }
-
-  private static String pad(String s) {
-    return "0000000000000000".substring(s.length()) + s;
+    String expectedContextString = "123:456:789:66";
+    assertTrue(context.getTraceId().equals("123"));
+    assertTrue(context.toString().equals(expectedContextString));
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java
index ce6f58dadc..fe4ccc0cb5 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java
@@ -43,7 +43,7 @@ public class MockGatheringChannel implements GatheringByteChannel {
       throws IOException {
 
     checkElementIndex(offset, srcs.length, "offset");
-    checkElementIndex(offset + length - 1, srcs.length, "offset+length");
+    checkElementIndex(offset+length-1, srcs.length, "offset+length");
 
     long bytes = 0;
     for (ByteBuffer b : srcs) {
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java
index f9c194d45c..cbdd558cbe 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java
@@ -52,18 +52,18 @@ public class TestResourceSemaphore {
     try {
       g.release(1, 0);
       Assert.fail("Should have failed.");
-    } catch (IllegalStateException e) {
+    } catch (IllegalStateException e){
     }
     try {
       g.release(0, 1);
       Assert.fail("Should have failed.");
-    } catch (IllegalStateException e) {
+    } catch (IllegalStateException e){
     }
   }
 
   static void assertUsed(ResourceSemaphore.Group g, int... expected) {
     Assert.assertEquals(expected.length, g.resourceSize());
-    for (int i = 0; i < expected.length; i++) {
+    for(int i = 0; i < expected.length; i++) {
       Assert.assertEquals(expected[i], g.get(i).used());
     }
   }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
index 9555225b22..0c2d98fab2 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
@@ -27,7 +27,7 @@ public class DummyEntity implements Auditable {
   private String key1;
   private String key2;
 
-  public DummyEntity() {
+  public DummyEntity(){
     this.key1 = "value1";
     this.key2 = "value2";
   }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
index 01fceaea88..41dc4f5b7e 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
@@ -189,7 +189,7 @@ public class TestOzoneAuditLogger {
       lines = FileUtils.readLines(file, (String)null);
       try {
         Thread.sleep(500 * (i + 1));
-      } catch (InterruptedException ie) {
+      } catch(InterruptedException ie) {
         Thread.currentThread().interrupt();
         break;
       }
@@ -212,7 +212,7 @@ public class TestOzoneAuditLogger {
     assertEquals(0, lines.size());
   }
 
-  private static class TestException extends Exception {
+  private static class TestException extends Exception{
     TestException(String message) {
       super(message);
     }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java
index a61ff9054b..2e144e6569 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java
@@ -73,7 +73,7 @@ public class TestChecksum {
     // Change the data and check if new checksum matches the original checksum.
     // Modifying one byte of data should be enough for the checksum data to
     // mismatch
-    data[50] = (byte) (data[50] + 1);
+    data[50] = (byte) (data[50]+1);
     ChecksumData newChecksumData = checksum.computeChecksum(data);
     Assert.assertNotEquals("Checksums should not match for different data",
         originalChecksumData, newChecksumData);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
index 1e85099179..9b69fad791 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
@@ -46,7 +46,7 @@ public class TestChunkBuffer {
   public void testImplWithByteBuffer() {
     runTestImplWithByteBuffer(1);
     runTestImplWithByteBuffer(1 << 10);
-    for (int i = 0; i < 10; i++) {
+    for(int i = 0; i < 10; i++) {
       runTestImplWithByteBuffer(nextInt(100) + 1);
     }
   }
@@ -62,7 +62,7 @@ public class TestChunkBuffer {
     runTestIncrementalChunkBuffer(1, 1);
     runTestIncrementalChunkBuffer(4, 8);
     runTestIncrementalChunkBuffer(16, 1 << 10);
-    for (int i = 0; i < 10; i++) {
+    for(int i = 0; i < 10; i++) {
       final int a = ThreadLocalRandom.current().nextInt(100) + 1;
       final int b = ThreadLocalRandom.current().nextInt(100) + 1;
       runTestIncrementalChunkBuffer(Math.min(a, b), Math.max(a, b));
@@ -80,7 +80,7 @@ public class TestChunkBuffer {
   public void testImplWithList() {
     runTestImplWithList(4, 8);
     runTestImplWithList(16, 1 << 10);
-    for (int i = 0; i < 10; i++) {
+    for(int i = 0; i < 10; i++) {
       final int a = ThreadLocalRandom.current().nextInt(10) + 1;
       final int b = ThreadLocalRandom.current().nextInt(100) + 1;
       runTestImplWithList(Math.min(a, b), Math.max(a, b));
@@ -131,7 +131,7 @@ public class TestChunkBuffer {
       assertIterate(expected, impl, bpc);
     } else if (bpc == 0) {
       for (int d = 1; d < 5; d++) {
-        final int bytesPerChecksum = n / d;
+        final int bytesPerChecksum = n/d;
         if (bytesPerChecksum > 0) {
           assertIterate(expected, impl, bytesPerChecksum);
         }
@@ -148,7 +148,7 @@ public class TestChunkBuffer {
   private static void assertDuplicate(byte[] expected, ChunkBuffer impl) {
     final int n = expected.length;
     assertToByteString(expected, 0, n, impl);
-    for (int i = 0; i < 10; i++) {
+    for(int i = 0; i < 10; i++) {
       final int offset = nextInt(n);
       final int length = nextInt(n - offset + 1);
       assertToByteString(expected, offset, length, impl);
@@ -165,14 +165,14 @@ public class TestChunkBuffer {
     final int numChecksums = (n + bpc - 1) / bpc;
     final Iterator<ByteBuffer> i = duplicated.iterate(bpc).iterator();
     int count = 0;
-    for (int j = 0; j < numChecksums; j++) {
+    for(int j = 0; j < numChecksums; j++) {
       final ByteBuffer b = i.next();
-      final int expectedRemaining = j < numChecksums - 1 ?
-          bpc : n - bpc * (numChecksums - 1);
+      final int expectedRemaining = j < numChecksums - 1?
+          bpc : n - bpc *(numChecksums - 1);
       Assert.assertEquals(expectedRemaining, b.remaining());
 
-      final int offset = j * bpc;
-      for (int k = 0; k < expectedRemaining; k++) {
+      final int offset = j* bpc;
+      for(int k = 0; k < expectedRemaining; k++) {
         Assert.assertEquals(expected[offset + k], b.get());
         count++;
       }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java
index be0575d9d0..c1470bb2ef 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java
@@ -45,12 +45,12 @@ public class TestStateMachine {
   /**
    * STATES used by the test state machine.
    */
-  public enum STATES { INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL };
+  public enum STATES {INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL};
 
   /**
    * EVENTS used by the test state machine.
    */
-  public enum EVENTS { ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT };
+  public enum EVENTS {ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT};
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index c2e4c5542a..95282d5f7b 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -352,7 +352,7 @@ public final class ContainerTestHelper {
     LOG.trace("addContainer: {}", containerID);
 
     Builder request = getContainerCommandRequestBuilder(containerID, pipeline);
-    if (token != null) {
+    if(token != null){
       request.setEncodedToken(token.encodeToUrlString());
     }
     return request.build();
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
index c9b9bf1de5..a51be5ff3a 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
@@ -51,7 +51,7 @@ public class TestLeaseManager {
 
     @Override
     public boolean equals(Object obj) {
-      if (obj instanceof DummyResource) {
+      if(obj instanceof DummyResource) {
         return name.equals(((DummyResource) obj).name);
       }
       return false;
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/UpgradeTestUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeUtils.java
similarity index 95%
rename from hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/UpgradeTestUtils.java
rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeUtils.java
index d3990e6098..6daec6c80e 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/UpgradeTestUtils.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeUtils.java
@@ -28,8 +28,8 @@ import java.util.UUID;
 /**
  * Upgrade related test utility methods.
  */
-public final class UpgradeTestUtils {
-  private UpgradeTestUtils() { }
+public final class TestUpgradeUtils {
+  private TestUpgradeUtils() { }
 
   /**
    * Creates a VERSION file for the specified node type under the directory
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 3b9afa9312..58c6e72846 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -111,12 +111,12 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin {
   private final Map<String, RatisDropwizardExports> ratisMetricsMap =
       new ConcurrentHashMap<>();
   private DNMXBeanImpl serviceRuntimeInfo =
-      new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) { };
+      new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) {};
   private ObjectName dnInfoBeanName;
   private DatanodeCRLStore dnCRLStore;
 
   //Constructor for DataNode PluginService
-  public HddsDatanodeService() { }
+  public HddsDatanodeService(){}
 
   public HddsDatanodeService(boolean printBanner, String[] args) {
     this.printBanner = printBanner;
@@ -376,7 +376,7 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin {
               datanodeDetails.getProtoBufMessage(),
               getEncodedString(csr));
       // Persist certificates.
-      if (response.hasX509CACertificate()) {
+      if(response.hasX509CACertificate()) {
         String pemEncodedCert = response.getX509Certificate();
         dnCertClient.storeCertificate(pemEncodedCert, true);
         dnCertClient.storeCertificate(response.getX509CACertificate(), true,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
index 3d6cb3b135..970251c673 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.metrics2.lib.MutableRate;
  *
  */
 @InterfaceAudience.Private
-@Metrics(about = "Storage Container DataNode Metrics", context = "dfs")
+@Metrics(about="Storage Container DataNode Metrics", context="dfs")
 public class ContainerMetrics {
   public static final String STORAGE_CONTAINER_METRICS =
       "StorageContainerMetrics";
@@ -106,7 +106,7 @@ public class ContainerMetrics {
     numOpsArray[type.ordinal()].incr();
   }
 
-  public long getContainerOpsMetrics(ContainerProtos.Type type) {
+  public long getContainerOpsMetrics(ContainerProtos.Type type){
     return numOpsArray[type.ordinal()].value();
   }
 
@@ -122,7 +122,7 @@ public class ContainerMetrics {
     opsBytesArray[type.ordinal()].incr(bytes);
   }
 
-  public long getContainerBytesMetrics(ContainerProtos.Type type) {
+  public long getContainerBytesMetrics(ContainerProtos.Type type){
     return opsBytesArray[type.ordinal()].value();
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 2b6318385d..032705d4ee 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -189,7 +189,7 @@ public final class ContainerUtils {
             HddsConfigKeys.HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED,
             HddsConfigKeys.
                     HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED_DEFAULT);
-    if (enabled) {
+    if(enabled) {
       String storedChecksum = containerData.getChecksum();
 
       Yaml yaml = ContainerDataYaml.getYamlForContainerType(
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java
index d2ceacd37a..4db6d3120f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java
@@ -36,15 +36,15 @@ public class DatanodeVersionFile {
   private final String clusterId;
   private final String datanodeUuid;
   private final long cTime;
-  private final int layoutVersion;
+  private final int layOutVersion;
 
   public DatanodeVersionFile(String storageId, String clusterId,
-      String datanodeUuid, long cTime, int layoutVersion) {
+      String datanodeUuid, long cTime, int layOutVersion) {
     this.storageId = storageId;
     this.clusterId = clusterId;
     this.datanodeUuid = datanodeUuid;
     this.cTime = cTime;
-    this.layoutVersion = layoutVersion;
+    this.layOutVersion = layOutVersion;
   }
 
   private Properties createProperties() {
@@ -54,7 +54,7 @@ public class DatanodeVersionFile {
     properties.setProperty(OzoneConsts.DATANODE_UUID, datanodeUuid);
     properties.setProperty(OzoneConsts.CTIME, String.valueOf(cTime));
     properties.setProperty(OzoneConsts.LAYOUTVERSION, String.valueOf(
-        layoutVersion));
+        layOutVersion));
     return properties;
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
similarity index 77%
rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java
rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
index a5901a2087..e0341fa0d8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
@@ -36,7 +36,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Defines layout versions for the Chunks.
  */
-public enum ContainerLayoutVersion {
+public enum ChunkLayOutVersion {
 
   FILE_PER_CHUNK(1, "One file per chunk") {
     @Override
@@ -54,31 +54,29 @@ public enum ContainerLayoutVersion {
   };
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerLayoutVersion.class);
+      LoggerFactory.getLogger(ChunkLayOutVersion.class);
 
-  private static final ContainerLayoutVersion
-      DEFAULT_LAYOUT = ContainerLayoutVersion.FILE_PER_BLOCK;
+  private static final ChunkLayOutVersion
+      DEFAULT_LAYOUT = ChunkLayOutVersion.FILE_PER_BLOCK;
 
-  private static final List<ContainerLayoutVersion> CONTAINER_LAYOUT_VERSIONS =
+  private static final List<ChunkLayOutVersion> CHUNK_LAYOUT_VERSIONS =
       ImmutableList.copyOf(values());
 
   private final int version;
   private final String description;
 
-  ContainerLayoutVersion(int version, String description) {
+  ChunkLayOutVersion(int version, String description) {
     this.version = version;
     this.description = description;
   }
 
   /**
-   * Return ContainerLayoutVersion object for the numeric containerVersion.
+   * Return ChunkLayOutVersion object for the numeric chunkVersion.
    */
-  public static ContainerLayoutVersion getContainerLayoutVersion(
-      int containerVersion) {
-    for (ContainerLayoutVersion containerLayoutVersion :
-        CONTAINER_LAYOUT_VERSIONS) {
-      if (containerLayoutVersion.getVersion() == containerVersion) {
-        return containerLayoutVersion;
+  public static ChunkLayOutVersion getChunkLayOutVersion(int chunkVersion) {
+    for (ChunkLayOutVersion chunkLayOutVersion : CHUNK_LAYOUT_VERSIONS) {
+      if (chunkLayOutVersion.getVersion() == chunkVersion) {
+        return chunkLayOutVersion;
       }
     }
     return null;
@@ -87,17 +85,17 @@ public enum ContainerLayoutVersion {
   /**
    * @return list of all versions.
    */
-  public static List<ContainerLayoutVersion> getAllVersions() {
-    return CONTAINER_LAYOUT_VERSIONS;
+  public static List<ChunkLayOutVersion> getAllVersions() {
+    return CHUNK_LAYOUT_VERSIONS;
   }
 
   /**
    * @return the latest version.
    */
-  public static ContainerLayoutVersion getConfiguredVersion(
+  public static ChunkLayOutVersion getConfiguredVersion(
       ConfigurationSource conf) {
     try {
-      return conf.getEnum(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY,
+      return conf.getEnum(ScmConfigKeys.OZONE_SCM_CHUNK_LAYOUT_KEY,
           DEFAULT_LAYOUT);
     } catch (IllegalArgumentException e) {
       return DEFAULT_LAYOUT;
@@ -129,7 +127,7 @@ public enum ContainerLayoutVersion {
 
   @Override
   public String toString() {
-    return "ContainerLayout:v" + version;
+    return "ChunkLayout:v" + version;
   }
 
   private static File getChunkDir(ContainerData containerData)
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index fa70819df0..f64774e952 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -129,20 +129,19 @@ public abstract class ContainerData {
    * Creates a ContainerData Object, which holds metadata of the container.
    * @param type - ContainerType
    * @param containerId - ContainerId
-   * @param layoutVersion - Container layoutVersion
+   * @param layOutVersion - Container layOutVersion
    * @param size - Container maximum size in bytes
    * @param originPipelineId - Pipeline Id where this container is/was created
    * @param originNodeId - Node Id where this container is/was created
    */
   protected ContainerData(ContainerType type, long containerId,
-                          ContainerLayoutVersion layoutVersion, long size,
-                          String originPipelineId,
-                          String originNodeId) {
+      ChunkLayOutVersion layOutVersion, long size, String originPipelineId,
+      String originNodeId) {
     Preconditions.checkNotNull(type);
 
     this.containerType = type;
     this.containerID = containerId;
-    this.layOutVersion = layoutVersion.getVersion();
+    this.layOutVersion = layOutVersion.getVersion();
     this.metadata = new TreeMap<>();
     this.state = ContainerDataProto.State.OPEN;
     this.readCount = new AtomicLong(0L);
@@ -159,7 +158,7 @@ public abstract class ContainerData {
 
   protected ContainerData(ContainerData source) {
     this(source.getContainerType(), source.getContainerID(),
-        source.getLayoutVersion(), source.getMaxSize(),
+        source.getLayOutVersion(), source.getMaxSize(),
         source.getOriginPipelineId(), source.getOriginNodeId());
   }
 
@@ -226,11 +225,11 @@ public abstract class ContainerData {
   }
 
   /**
-   * Returns the layoutVersion of the actual container data format.
-   * @return layoutVersion
+   * Returns the layOutVersion of the actual container data format.
+   * @return layOutVersion
    */
-  public ContainerLayoutVersion getLayoutVersion() {
-    return ContainerLayoutVersion.getContainerLayoutVersion(layOutVersion);
+  public ChunkLayOutVersion getLayOutVersion() {
+    return ChunkLayOutVersion.getChunkLayOutVersion(layOutVersion);
   }
 
   /**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
index b4e15dbf6d..244750aab4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
@@ -251,10 +251,9 @@ public final class ContainerDataYaml {
         Map<Object, Object> nodes = constructMapping(mnode);
 
         //Needed this, as TAG.INT type is by default converted to Long.
-        long layoutVersion = (long) nodes.get(OzoneConsts.LAYOUTVERSION);
-        ContainerLayoutVersion containerLayoutVersion =
-            ContainerLayoutVersion.getContainerLayoutVersion(
-                (int) layoutVersion);
+        long layOutVersion = (long) nodes.get(OzoneConsts.LAYOUTVERSION);
+        ChunkLayOutVersion layoutVersion =
+            ChunkLayOutVersion.getChunkLayOutVersion((int) layOutVersion);
 
         long size = (long) nodes.get(OzoneConsts.MAX_SIZE);
 
@@ -264,8 +263,8 @@ public final class ContainerDataYaml {
 
         //When a new field is added, it needs to be added here.
         KeyValueContainerData kvData = new KeyValueContainerData(
-            (long) nodes.get(OzoneConsts.CONTAINER_ID), containerLayoutVersion,
-            size, originPipelineId, originNodeId);
+            (long) nodes.get(OzoneConsts.CONTAINER_ID), layoutVersion, size,
+            originPipelineId, originNodeId);
 
         kvData.setContainerDBType((String)nodes.get(
             OzoneConsts.CONTAINER_DB_TYPE));
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 1edd046f09..5dbba2bc98 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -581,17 +581,16 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
   }
 
   private void audit(AuditAction action, EventType eventType,
-      Map<String, String> params, AuditEventStatus result,
-      Throwable exception) {
+      Map<String, String> params, AuditEventStatus result, Throwable exception){
     AuditMessage amsg;
     switch (result) {
     case SUCCESS:
-      if (isAllowed(action.getAction())) {
-        if (eventType == EventType.READ &&
+      if(isAllowed(action.getAction())) {
+        if(eventType == EventType.READ &&
             AUDIT.getLogger().isInfoEnabled(AuditMarker.READ.getMarker())) {
           amsg = buildAuditMessageForSuccess(action, params);
           AUDIT.logReadSuccess(amsg);
-        } else if (eventType == EventType.WRITE &&
+        } else if(eventType == EventType.WRITE &&
             AUDIT.getLogger().isInfoEnabled(AuditMarker.WRITE.getMarker())) {
           amsg = buildAuditMessageForSuccess(action, params);
           AUDIT.logWriteSuccess(amsg);
@@ -600,11 +599,11 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
       break;
 
     case FAILURE:
-      if (eventType == EventType.READ &&
+      if(eventType == EventType.READ &&
           AUDIT.getLogger().isErrorEnabled(AuditMarker.READ.getMarker())) {
         amsg = buildAuditMessageForFailure(action, params, exception);
         AUDIT.logReadFailure(amsg);
-      } else if (eventType == EventType.WRITE &&
+      } else if(eventType == EventType.WRITE &&
           AUDIT.getLogger().isErrorEnabled(AuditMarker.WRITE.getMarker())) {
         amsg = buildAuditMessageForFailure(action, params, exception);
         AUDIT.logWriteFailure(amsg);
@@ -657,7 +656,7 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
    * @return true or false accordingly.
    */
   private boolean isAllowed(String action) {
-    switch (action) {
+    switch(action) {
     case "CLOSE_CONTAINER":
     case "CREATE_CONTAINER":
     case "LIST_CONTAINER":
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
index d6ca2d120e..b736eb536e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
@@ -130,7 +130,7 @@ public class OpenContainerBlockMap {
   public void removeFromBlockMap(BlockID blockID) {
     Preconditions.checkNotNull(blockID);
     containers.computeIfPresent(blockID.getContainerID(), (containerId, blocks)
-        -> blocks.removeAndGetSize(blockID.getLocalID()) == 0 ? null : blocks);
+        -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks);
   }
 
   /**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
index 24df9f5b1e..14ae4c943c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
@@ -37,6 +37,8 @@ public class DatanodeConfiguration {
   private static final Logger LOG =
       LoggerFactory.getLogger(DatanodeConfiguration.class);
 
+  static final String REPLICATION_STREAMS_LIMIT_KEY =
+      "hdds.datanode.replication.streams.limit";
   static final String CONTAINER_DELETE_THREADS_MAX_KEY =
       "hdds.datanode.container.delete.threads.max";
   static final String PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY =
@@ -55,6 +57,8 @@ public class DatanodeConfiguration {
 
   static final boolean CHUNK_DATA_VALIDATION_CHECK_DEFAULT = false;
 
+  static final int REPLICATION_MAX_STREAMS_DEFAULT = 10;
+
   static final long PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT = 60;
 
   static final int FAILED_VOLUMES_TOLERATED_DEFAULT = -1;
@@ -67,6 +71,19 @@ public class DatanodeConfiguration {
   static final long DISK_CHECK_TIMEOUT_DEFAULT =
       Duration.ofMinutes(10).toMillis();
 
+  /**
+   * The maximum number of replication commands a single datanode can execute
+   * simultaneously.
+   */
+  @Config(key = "replication.streams.limit",
+      type = ConfigType.INT,
+      defaultValue = "10",
+      tags = {DATANODE},
+      description = "The maximum number of replication commands a single " +
+          "datanode can execute simultaneously"
+  )
+  private int replicationMaxStreams = REPLICATION_MAX_STREAMS_DEFAULT;
+
   /**
    * Number of threads per volume that Datanode will use for chunk read.
    */
@@ -121,7 +138,7 @@ public class DatanodeConfiguration {
       type = ConfigType.INT,
       defaultValue = "1440",
       tags = {DATANODE},
-      description = "The maximum number of block delete commands queued on " +
+      description = "The maximum number of block delete commands queued on "+
           " a datanode"
   )
   private int blockDeleteQueueLimit = 60 * 24;
@@ -247,6 +264,13 @@ public class DatanodeConfiguration {
 
   @PostConstruct
   public void validate() {
+    if (replicationMaxStreams < 1) {
+      LOG.warn(REPLICATION_STREAMS_LIMIT_KEY + " must be greater than zero " +
+              "and was set to {}. Defaulting to {}",
+          replicationMaxStreams, REPLICATION_MAX_STREAMS_DEFAULT);
+      replicationMaxStreams = REPLICATION_MAX_STREAMS_DEFAULT;
+    }
+
     if (containerDeleteThreads < 1) {
       LOG.warn(CONTAINER_DELETE_THREADS_MAX_KEY + " must be greater than zero" +
               " and was set to {}. Defaulting to {}",
@@ -292,10 +316,18 @@ public class DatanodeConfiguration {
     }
   }
 
+  public void setReplicationMaxStreams(int replicationMaxStreams) {
+    this.replicationMaxStreams = replicationMaxStreams;
+  }
+
   public void setContainerDeleteThreads(int containerDeleteThreads) {
     this.containerDeleteThreads = containerDeleteThreads;
   }
 
+  public int getReplicationMaxStreams() {
+    return replicationMaxStreams;
+  }
+
   public int getContainerDeleteThreads() {
     return containerDeleteThreads;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index ce79049f4f..ee5e87adca 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -55,7 +55,6 @@ import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.container.replication.ContainerReplicator;
 import org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator;
 import org.apache.hadoop.ozone.container.replication.MeasuredReplicator;
-import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig;
 import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor;
 import org.apache.hadoop.ozone.container.replication.ReplicationSupervisorMetrics;
 import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
@@ -167,11 +166,9 @@ public class DatanodeStateMachine implements Closeable {
 
     replicatorMetrics = new MeasuredReplicator(replicator);
 
-    ReplicationConfig replicationConfig =
-        conf.getObject(ReplicationConfig.class);
     supervisor =
         new ReplicationSupervisor(container.getContainerSet(), context,
-            replicatorMetrics, replicationConfig);
+            replicatorMetrics, dnConf.getReplicationMaxStreams());
 
     replicationSupervisorMetrics =
         ReplicationSupervisorMetrics.create(supervisor);
@@ -293,7 +290,7 @@ public class DatanodeStateMachine implements Closeable {
 
       now = Time.monotonicNow();
       if (now < nextHB.get()) {
-        if (!Thread.interrupted()) {
+        if(!Thread.interrupted()) {
           try {
             Thread.sleep(nextHB.get() - now);
           } catch (InterruptedException e) {
@@ -379,7 +376,7 @@ public class DatanodeStateMachine implements Closeable {
       connectionManager.close();
     }
 
-    if (container != null) {
+    if(container != null) {
       container.stop();
     }
 
@@ -637,12 +634,12 @@ public class DatanodeStateMachine implements Closeable {
   }
 
   public StatusAndMessages finalizeUpgrade()
-      throws IOException {
+      throws IOException{
     return upgradeFinalizer.finalize(datanodeDetails.getUuidString(), this);
   }
 
   public StatusAndMessages queryUpgradeStatus()
-      throws IOException {
+      throws IOException{
     return upgradeFinalizer.reportStatus(datanodeDetails.getUuidString(),
         true);
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index c75da0a74d..9eea758b0d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -184,7 +184,7 @@ public class StateContext {
   /**
    * init related ReportType Collections.
    */
-  private void initReportTypeCollection() {
+  private void initReportTypeCollection(){
     fullReportTypeList.add(CONTAINER_REPORTS_PROTO_NAME);
     type2Reports.put(CONTAINER_REPORTS_PROTO_NAME, containerReports);
     fullReportTypeList.add(NODE_REPORT_PROTO_NAME);
@@ -221,7 +221,7 @@ public class StateContext {
    */
   boolean isExiting(DatanodeStateMachine.DatanodeStates newState) {
     boolean isExiting = state != newState && stateExecutionCount.get() > 0;
-    if (isExiting) {
+    if(isExiting) {
       stateExecutionCount.set(0);
     }
     return isExiting;
@@ -344,7 +344,7 @@ public class StateContext {
       Preconditions.checkState(reportType != null);
     }
     synchronized (incrementalReportsQueue) {
-      if (incrementalReportsQueue.containsKey(endpoint)) {
+      if (incrementalReportsQueue.containsKey(endpoint)){
         incrementalReportsQueue.get(endpoint).addAll(0, reportsToPutBack);
       }
     }
@@ -381,7 +381,7 @@ public class StateContext {
       InetSocketAddress endpoint) {
     Map<String, AtomicBoolean> mp = fullReportSendIndicator.get(endpoint);
     List<GeneratedMessage> nonIncrementalReports = new LinkedList<>();
-    if (null != mp) {
+    if (null != mp){
       for (Map.Entry<String, AtomicBoolean> kv : mp.entrySet()) {
         if (kv.getValue().get()) {
           String reportType = kv.getKey();
@@ -817,14 +817,14 @@ public class StateContext {
    */
   public boolean updateCommandStatus(Long cmdId,
       Consumer<CommandStatus> cmdStatusUpdater) {
-    if (cmdStatusMap.containsKey(cmdId)) {
+    if(cmdStatusMap.containsKey(cmdId)) {
       cmdStatusUpdater.accept(cmdStatusMap.get(cmdId));
       return true;
     }
     return false;
   }
 
-  public void configureHeartbeatFrequency() {
+  public void configureHeartbeatFrequency(){
     heartbeatFrequency.set(getScmHeartbeatInterval(conf));
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
index 7908e3d7d2..a5044cb068 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
@@ -68,7 +68,7 @@ public final class CommandDispatcher {
     this.connectionManager = connectionManager;
     handlerMap = new HashMap<>();
     for (CommandHandler h : handlers) {
-      if (handlerMap.containsKey(h.getCommandType())) {
+      if(handlerMap.containsKey(h.getCommandType())){
         LOG.error("Duplicate handler for the same command. Exiting. Handle " +
             "key : {}", h.getCommandType().getDescriptorForType().getName());
         throw new IllegalArgumentException("Duplicate handler for the same " +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
index a766de025d..217592ddcc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
@@ -81,7 +81,7 @@ public class InitDatanodeState implements DatanodeState,
     try {
       addresses = getSCMAddressForDatanodes(conf);
     } catch (IllegalArgumentException e) {
-      if (!Strings.isNullOrEmpty(e.getMessage())) {
+      if(!Strings.isNullOrEmpty(e.getMessage())) {
         LOG.error("Failed to get SCM addresses: {}", e.getMessage());
       }
       return DatanodeStateMachine.DatanodeStates.SHUTDOWN;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index d80d1e5bca..fa6c937f63 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -122,7 +122,7 @@ public class VersionEndpointTask implements
       }
     } catch (DiskOutOfSpaceException ex) {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN);
-    } catch (IOException ex) {
+    } catch(IOException ex) {
       rpcEndPoint.logIfNeeded(ex);
     } finally {
       rpcEndPoint.unlock();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
index 557473bf9c..4ecf2789a4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
@@ -33,7 +33,7 @@ import org.apache.ratis.protocol.RaftGroupId;
  * This class is for maintaining Container State Machine statistics.
  */
 @InterfaceAudience.Private
-@Metrics(about = "Container State Machine Metrics", context = "dfs")
+@Metrics(about="Container State Machine Metrics", context="dfs")
 public class CSMMetrics {
   public static final String SOURCE_NAME =
       CSMMetrics.class.getSimpleName();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 4ef532049f..301fc59237 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -689,7 +689,7 @@ public class ContainerStateMachine extends BaseStateMachine {
   private synchronized void updateLastApplied() {
     Long appliedTerm = null;
     long appliedIndex = -1;
-    for (long i = getLastAppliedTermIndex().getIndex() + 1;; i++) {
+    for(long i = getLastAppliedTermIndex().getIndex() + 1;; i++) {
       final Long removed = applyTransactionCompletionMap.remove(i);
       if (removed == null) {
         break;
@@ -740,7 +740,7 @@ public class ContainerStateMachine extends BaseStateMachine {
         = queue.submit(task, executor);
     // after the task is completed, remove the queue if the queue is empty.
     f.thenAccept(dummy -> containerTaskQueues.computeIfPresent(containerId,
-        (id, q) -> q.isEmpty() ? null : q));
+        (id, q) -> q.isEmpty()? null: q));
     return f;
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 237b448682..c04e5e967b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -273,7 +273,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     // Set the ratis storage directory
     Collection<String> storageDirPaths =
             HddsServerUtil.getOzoneDatanodeRatisDirectory(conf);
-    List<File> storageDirs = new ArrayList<>(storageDirPaths.size());
+    List<File> storageDirs= new ArrayList<>(storageDirPaths.size());
     storageDirPaths.stream().forEach(d -> storageDirs.add(new File(d)));
 
     RaftServerConfigKeys.setStorageDir(properties, storageDirs);
@@ -693,7 +693,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     long bytesWritten = 0;
     Iterator<org.apache.hadoop.ozone.container.common.interfaces.Container<?>>
         containerIt = containerController.getContainers();
-    while (containerIt.hasNext()) {
+    while(containerIt.hasNext()) {
       ContainerData containerData = containerIt.next().getContainerData();
       if (containerData.getOriginPipelineId()
           .compareTo(pipelineID.getId()) == 0) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index 6a38080214..83b8615887 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -145,7 +145,7 @@ public final class HddsVolumeUtil {
     String lvStr = getProperty(props, OzoneConsts.LAYOUTVERSION, versionFile);
 
     int lv = Integer.parseInt(lvStr);
-    if (HDDSVolumeLayoutVersion.getLatestVersion().getVersion() != lv) {
+    if(HDDSVolumeLayoutVersion.getLatestVersion().getVersion() != lv) {
       throw new InconsistentStorageStateException("Invalid layOutVersion. " +
           "Version file has layOutVersion as " + lv + " and latest Datanode " +
           "layOutVersion is " +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index 98e16294da..35ff05e707 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -290,7 +290,7 @@ public class MutableVolumeSet implements VolumeSet {
   }
 
   public void refreshAllVolumeUsage() {
-    volumeMap.forEach((k, v) -> v.refreshVolumeInfo());
+    volumeMap.forEach((k, v)-> v.refreshVolumeInfo());
   }
 
   /**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
index 715cb8400f..5f629ad464 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
@@ -161,7 +161,7 @@ public abstract class StorageVolume
   }
 
   public StorageType getStorageType() {
-    if (this.volumeInfo != null) {
+    if(this.volumeInfo != null) {
       return this.volumeInfo.getStorageType();
     }
     return StorageType.DEFAULT;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index 1fcac8327f..255e7ea82e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -87,7 +87,7 @@ public class VolumeUsage implements SpaceUsageSource {
     }
   }
 
-  public void refreshNow() {
+  public void refreshNow(){
     source.refreshNow();
   }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index e0ba37a99d..1284f6a102 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -382,7 +382,7 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
 
   private void compactDB() throws StorageContainerException {
     try {
-      try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
+      try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
         db.getStore().compactDB();
       }
     } catch (StorageContainerException ex) {
@@ -435,7 +435,7 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
     // holding lock and writing data to disk. We can have async implementation
     // to flush the update container data to disk.
     long containerId = containerData.getContainerID();
-    if (!containerData.isValid()) {
+    if(!containerData.isValid()) {
       LOG.debug("Invalid container data. ContainerID: {}", containerId);
       throw new StorageContainerException("Invalid container data. " +
           "ContainerID: " + containerId, INVALID_CONTAINER_STATE);
@@ -774,7 +774,7 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
    * @return
    * @throws IOException
    */
-  private File createTempFile(File file) throws IOException {
+  private File createTempFile(File file) throws IOException{
     return File.createTempFile("tmp_" + System.currentTimeMillis() + "_",
         file.getName(), file.getParentFile());
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
index 40d527d464..11c245ab14 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.common.OzoneChecksumException;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
@@ -228,15 +228,15 @@ public class KeyValueContainerCheck {
 
     onDiskContainerData.setDbFile(dbFile);
 
-    ContainerLayoutVersion layout = onDiskContainerData.getLayoutVersion();
+    ChunkLayOutVersion layout = onDiskContainerData.getLayOutVersion();
 
-    try (ReferenceCountedDB db =
+    try(ReferenceCountedDB db =
             BlockUtils.getDB(onDiskContainerData, checkConfig);
         BlockIterator<BlockData> kvIter = db.getStore().getBlockIterator()) {
 
-      while (kvIter.hasNext()) {
+      while(kvIter.hasNext()) {
         BlockData block = kvIter.nextBlock();
-        for (ContainerProtos.ChunkInfo chunk : block.getChunks()) {
+        for(ContainerProtos.ChunkInfo chunk : block.getChunks()) {
           File chunkFile = layout.getChunkFile(onDiskContainerData,
               block.getBlockID(), ChunkInfo.getFromProtoBuf(chunk));
 
@@ -263,7 +263,7 @@ public class KeyValueContainerCheck {
 
   private static void verifyChecksum(BlockData block,
       ContainerProtos.ChunkInfo chunk, File chunkFile,
-      ContainerLayoutVersion layout,
+      ChunkLayOutVersion layout,
       DataTransferThrottler throttler, Canceler canceler) throws IOException {
     ChecksumData checksumData =
         ChecksumData.getFromProtoBuf(chunk.getChecksumData());
@@ -275,12 +275,12 @@ public class KeyValueContainerCheck {
     long bytesRead = 0;
     try (FileChannel channel = FileChannel.open(chunkFile.toPath(),
         ChunkUtils.READ_OPTIONS, ChunkUtils.NO_ATTRIBUTES)) {
-      if (layout == ContainerLayoutVersion.FILE_PER_BLOCK) {
+      if (layout == ChunkLayOutVersion.FILE_PER_BLOCK) {
         channel.position(chunk.getOffset());
       }
       for (int i = 0; i < checksumCount; i++) {
         // limit last read for FILE_PER_BLOCK, to avoid reading next chunk
-        if (layout == ContainerLayoutVersion.FILE_PER_BLOCK &&
+        if (layout == ChunkLayOutVersion.FILE_PER_BLOCK &&
             i == checksumCount - 1 &&
             chunk.getLen() % bytesPerChecksum != 0) {
           buffer.limit((int) (chunk.getLen() % bytesPerChecksum));
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index 81333073cc..e1a1f02315 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerDataProto;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.yaml.snakeyaml.nodes.Tag;
@@ -96,12 +96,12 @@ public class KeyValueContainerData extends ContainerData {
   /**
    * Constructs KeyValueContainerData object.
    * @param id - ContainerId
-   * @param layoutVersion container layout
+   * @param layOutVersion chunk layout
    * @param size - maximum size of the container in bytes
    */
-  public KeyValueContainerData(long id, ContainerLayoutVersion layoutVersion,
+  public KeyValueContainerData(long id, ChunkLayOutVersion layOutVersion,
       long size, String originPipelineId, String originNodeId) {
-    super(ContainerProtos.ContainerType.KeyValueContainer, id, layoutVersion,
+    super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion,
         size, originPipelineId, originNodeId);
     this.numPendingDeletionBlocks = new AtomicLong(0);
     this.deleteTransactionId = 0;
@@ -270,7 +270,7 @@ public class KeyValueContainerData extends ContainerData {
       builder.setBytesUsed(this.getBytesUsed());
     }
 
-    if (this.getContainerType() != null) {
+    if(this.getContainerType() != null) {
       builder.setContainerType(ContainerProtos.ContainerType.KeyValueContainer);
     }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index ff2d061cb7..b499755a01 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -194,7 +194,7 @@ public class KeyValueHandler extends Handler {
       DispatcherContext dispatcherContext) {
     Type cmdType = request.getCmdType();
 
-    switch (cmdType) {
+    switch(cmdType) {
     case CreateContainer:
       return handler.handleCreateContainer(request, kvContainer);
     case ReadContainer:
@@ -266,8 +266,8 @@ public class KeyValueHandler extends Handler {
 
     long containerID = request.getContainerID();
 
-    ContainerLayoutVersion layoutVersion =
-        ContainerLayoutVersion.getConfiguredVersion(conf);
+    ChunkLayOutVersion layoutVersion =
+        ChunkLayOutVersion.getConfiguredVersion(conf);
     KeyValueContainerData newContainerData = new KeyValueContainerData(
         containerID, layoutVersion, maxContainerSize, request.getPipelineID(),
         getDatanodeId());
@@ -744,7 +744,7 @@ public class KeyValueHandler extends Handler {
           .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext);
 
       // We should increment stats after writeChunk
-      if (stage == WriteChunkStage.WRITE_DATA ||
+      if (stage == WriteChunkStage.WRITE_DATA||
           stage == WriteChunkStage.COMBINED) {
         metrics.incContainerBytesStats(Type.WriteChunk, writeChunk
             .getChunkData().getLen());
@@ -959,7 +959,7 @@ public class KeyValueHandler extends Handler {
   public void exportContainer(final Container container,
       final OutputStream outputStream,
       final TarContainerPacker packer)
-      throws IOException {
+      throws IOException{
     final KeyValueContainer kvc = (KeyValueContainer) container;
     kvc.exportContainerData(outputStream, packer);
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
index dde3e2e22d..ad1673a02a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
@@ -97,7 +97,7 @@ public final class KeyValueContainerLocationUtil {
    * @param containerId
    * @return container sub directory
    */
-  private static String getContainerSubDirectory(long containerId) {
+  private static String getContainerSubDirectory(long containerId){
     int directory = (int) ((containerId >> 9) & 0xFF);
     return Storage.CONTAINER_DIR + directory;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 8256d0a5b4..58a0dcd949 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -119,7 +119,7 @@ public class BlockManagerImpl implements BlockManager {
         "cannot be negative");
     // We are not locking the key manager since LevelDb serializes all actions
     // against a single DB. We rely on DB level locking to avoid conflicts.
-    try (ReferenceCountedDB db = BlockUtils.
+    try(ReferenceCountedDB db = BlockUtils.
         getDB(container.getContainerData(), config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
@@ -216,7 +216,7 @@ public class BlockManagerImpl implements BlockManager {
               + containerBCSId + ".", UNKNOWN_BCSID);
     }
 
-    try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
+    try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
       Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
@@ -244,7 +244,7 @@ public class BlockManagerImpl implements BlockManager {
       throws IOException {
     KeyValueContainerData containerData = (KeyValueContainerData) container
         .getContainerData();
-    try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
+    try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
       Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
@@ -276,7 +276,7 @@ public class BlockManagerImpl implements BlockManager {
 
     KeyValueContainerData cData = (KeyValueContainerData) container
         .getContainerData();
-    try (ReferenceCountedDB db = BlockUtils.getDB(cData, config)) {
+    try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
       Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
index e998278c6f..27fe0d9cc0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
@@ -43,8 +43,8 @@ import java.util.EnumMap;
 import java.util.Map;
 
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK;
+import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK;
+import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK;
 
 /**
  * Selects ChunkManager implementation to use for each chunk operation.
@@ -54,8 +54,8 @@ public class ChunkManagerDispatcher implements ChunkManager {
   private static final Logger LOG =
       LoggerFactory.getLogger(ChunkManagerDispatcher.class);
 
-  private final Map<ContainerLayoutVersion, ChunkManager> handlers
-      = new EnumMap<>(ContainerLayoutVersion.class);
+  private final Map<ChunkLayOutVersion, ChunkManager> handlers
+      = new EnumMap<>(ChunkLayOutVersion.class);
 
   ChunkManagerDispatcher(boolean sync, BlockManager manager,
                          VolumeSet volSet) {
@@ -128,13 +128,11 @@ public class ChunkManagerDispatcher implements ChunkManager {
   private @Nonnull ChunkManager selectHandler(Container container)
       throws StorageContainerException {
 
-    ContainerLayoutVersion layout =
-        container.getContainerData().getLayoutVersion();
+    ChunkLayOutVersion layout = container.getContainerData().getLayOutVersion();
     return selectVersionHandler(layout);
   }
 
-  private @Nonnull ChunkManager selectVersionHandler(
-      ContainerLayoutVersion version)
+  private @Nonnull ChunkManager selectVersionHandler(ChunkLayOutVersion version)
       throws StorageContainerException {
     ChunkManager versionHandler = handlers.get(version);
     if (versionHandler == null) {
@@ -144,7 +142,7 @@ public class ChunkManagerDispatcher implements ChunkManager {
   }
 
   private static ChunkManager throwUnknownLayoutVersion(
-      ContainerLayoutVersion version) throws StorageContainerException {
+      ChunkLayOutVersion version) throws StorageContainerException {
 
     String message = "Unsupported storage container layout: " + version;
     LOG.warn(message);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
index 18c6b9d28d..5fd23b59a1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
@@ -56,7 +56,7 @@ import java.time.Duration;
 import java.util.concurrent.ExecutionException;
 
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK;
+import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK;
 import static org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage.COMMIT_DATA;
 import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure;
 import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;
@@ -86,7 +86,7 @@ public class FilePerBlockStrategy implements ChunkManager {
 
   private static void checkLayoutVersion(Container container) {
     Preconditions.checkArgument(
-        container.getContainerData().getLayoutVersion() == FILE_PER_BLOCK);
+        container.getContainerData().getLayOutVersion() == FILE_PER_BLOCK);
   }
 
   @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
index 52a8b7c6c4..f2109cb745 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
@@ -55,7 +55,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK;
+import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK;
 import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;
 
 /**
@@ -82,7 +82,7 @@ public class FilePerChunkStrategy implements ChunkManager {
 
   private static void checkLayoutVersion(Container container) {
     Preconditions.checkArgument(
-        container.getContainerData().getLayoutVersion() == FILE_PER_CHUNK);
+        container.getContainerData().getLayOutVersion() == FILE_PER_CHUNK);
   }
 
   /**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index d40afc5f67..905918a2f9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -351,7 +351,7 @@ public class BlockDeletingService extends BackgroundService {
         }
 
         // Once blocks are deleted... remove the blockID from blockDataTable.
-        try (BatchOperation batch = meta.getStore().getBatchHandler()
+        try(BatchOperation batch = meta.getStore().getBatchHandler()
             .initBatchOperation()) {
           for (String entry : succeedBlocks) {
             blockDataTable.deleteWithBatch(batch, entry);
@@ -426,7 +426,7 @@ public class BlockDeletingService extends BackgroundService {
 
         // Once blocks are deleted... remove the blockID from blockDataTable
         // and also remove the transactions from txnTable.
-        try (BatchOperation batch = meta.getStore().getBatchHandler()
+        try(BatchOperation batch = meta.getStore().getBatchHandler()
             .initBatchOperation()) {
           for (DeletedBlocksTransaction delTx : delBlocks) {
             deleteTxns.deleteWithBatch(batch, delTx.getTxID());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index a3049be467..15a8a9eb5b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -276,7 +276,7 @@ public abstract class AbstractDatanodeStore implements DatanodeStore {
         nextBlock = null;
         return currentBlock;
       }
-      if (hasNext()) {
+      if(hasNext()) {
         return nextBlock();
       }
       throw new NoSuchElementException("Block Iterator reached end for " +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
index 171303dc0b..9beec5b16c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
@@ -112,12 +112,7 @@ public class ContainerController {
   public void markContainerUnhealthy(final long containerId)
           throws IOException {
     Container container = containerSet.getContainer(containerId);
-    if (container != null) {
-      getHandler(container).markContainerUnhealthy(container);
-    } else {
-      LOG.warn("Container {} not found, may be deleted, skip mark UNHEALTHY",
-          containerId);
-    }
+    getHandler(container).markContainerUnhealthy(container);
   }
 
   /**
@@ -211,12 +206,7 @@ public class ContainerController {
   void updateDataScanTimestamp(long containerId, Instant timestamp)
       throws IOException {
     Container container = containerSet.getContainer(containerId);
-    if (container != null) {
-      container.updateDataScanTimestamp(timestamp);
-    } else {
-      LOG.warn("Container {} not found, may be deleted, " +
-          "skip update DataScanTimestamp", containerId);
-    }
+    container.updateDataScanTimestamp(timestamp);
   }
 
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java
index c924485531..4a20dc326a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java
@@ -32,7 +32,7 @@ import java.util.concurrent.ThreadLocalRandom;
  * This class captures the container data scrubber metrics on the data-node.
  **/
 @InterfaceAudience.Private
-@Metrics(about = "DataNode container data scrubber metrics", context = "dfs")
+@Metrics(about="DataNode container data scrubber metrics", context="dfs")
 public final class ContainerDataScrubberMetrics {
 
   private final String name;
@@ -110,8 +110,8 @@ public final class ContainerDataScrubberMetrics {
 
   public static ContainerDataScrubberMetrics create(final String volumeName) {
     MetricsSystem ms = DefaultMetricsSystem.instance();
-    String name = "ContainerDataScrubberMetrics-" + (volumeName.isEmpty()
-        ? "UndefinedDataNodeVolume" + ThreadLocalRandom.current().nextInt()
+    String name = "ContainerDataScrubberMetrics-"+ (volumeName.isEmpty()
+        ? "UndefinedDataNodeVolume"+ ThreadLocalRandom.current().nextInt()
         : volumeName.replace(':', '-'));
 
     return ms.register(name, null, new ContainerDataScrubberMetrics(name, ms));
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
index 59657b064a..96efcf4a14 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
@@ -90,7 +90,7 @@ public class ContainerMetadataScanner extends Thread {
         metrics.incNumContainersScanned();
       }
     }
-    long interval = System.nanoTime() - start;
+    long interval = System.nanoTime()-start;
     if (!stopping) {
       metrics.incNumScanIterations();
       LOG.info("Completed an iteration of container metadata scrubber in" +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java
index b70a3e5ed5..cf8e61725b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
  * data-node.
  **/
 @InterfaceAudience.Private
-@Metrics(about = "DataNode container data scrubber metrics", context = "dfs")
+@Metrics(about="DataNode container data scrubber metrics", context="dfs")
 public final class ContainerMetadataScrubberMetrics {
 
   private final String name;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
index 023b251a52..548d1147a7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
@@ -49,7 +49,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Client to read container data from gRPC.
  */
-public class GrpcReplicationClient implements AutoCloseable {
+public class GrpcReplicationClient implements AutoCloseable{
 
   private static final Logger LOG =
       LoggerFactory.getLogger(GrpcReplicationClient.class);
@@ -161,16 +161,7 @@ public class GrpcReplicationClient implements AutoCloseable {
       try {
         chunk.getData().writeTo(stream);
       } catch (IOException e) {
-        LOG.error("Failed to write the stream buffer to {} for container {}",
-            outputPath, containerId, e);
-        try {
-          stream.close();
-        } catch (IOException ex) {
-          LOG.error("Failed to close OutputStream {}", outputPath, e);
-        } finally {
-          deleteOutputOnFailure();
-          response.completeExceptionally(e);
-        }
+        response.completeExceptionally(e);
       }
     }
 
@@ -185,7 +176,6 @@ public class GrpcReplicationClient implements AutoCloseable {
       } catch (IOException e) {
         LOG.error("Failed to close {} for container {}",
             outputPath, containerId, e);
-        deleteOutputOnFailure();
         response.completeExceptionally(e);
       }
     }
@@ -199,9 +189,9 @@ public class GrpcReplicationClient implements AutoCloseable {
       } catch (IOException e) {
         LOG.error("Downloaded container {} OK, but failed to close {}",
             containerId, outputPath, e);
-        deleteOutputOnFailure();
         response.completeExceptionally(e);
       }
+
     }
 
     private void deleteOutputOnFailure() {
@@ -214,4 +204,5 @@ public class GrpcReplicationClient implements AutoCloseable {
       }
     }
   }
+
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java
index bf8d6f1025..dd5f4c4286 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,8 +22,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
-import org.apache.hadoop.hdds.conf.ConfigType;
-import org.apache.hadoop.hdds.conf.PostConstruct;
+import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
 import org.apache.hadoop.hdds.tracing.GrpcServerInterceptor;
@@ -40,9 +39,6 @@ import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.hadoop.hdds.conf.ConfigTag.DATANODE;
-import static org.apache.hadoop.hdds.conf.ConfigTag.MANAGEMENT;
-
 /**
  * Separated network server for server2server container replication.
  */
@@ -132,33 +128,12 @@ public class ReplicationServer {
   /**
    * Replication-related configuration.
    */
-  @ConfigGroup(prefix = ReplicationConfig.PREFIX)
+  @ConfigGroup(prefix = "hdds.datanode.replication")
   public static final class ReplicationConfig {
 
-    public static final String PREFIX = "hdds.datanode.replication";
-    public static final String STREAMS_LIMIT_KEY = "streams.limit";
-
-    public static final String REPLICATION_STREAMS_LIMIT_KEY =
-        PREFIX + "." + STREAMS_LIMIT_KEY;
-
-    public static final int REPLICATION_MAX_STREAMS_DEFAULT = 10;
-
-    /**
-     * The maximum number of replication commands a single datanode can execute
-     * simultaneously.
-     */
-    @Config(key = STREAMS_LIMIT_KEY,
-        type = ConfigType.INT,
-        defaultValue = "10",
-        tags = {DATANODE},
-        description = "The maximum number of replication commands a single " +
-            "datanode can execute simultaneously"
-    )
-    private int replicationMaxStreams = REPLICATION_MAX_STREAMS_DEFAULT;
-
-    @Config(key = "port", defaultValue = "9886",
-        description = "Port used for the server2server replication server",
-        tags = {DATANODE, MANAGEMENT})
+    @Config(key = "port", defaultValue = "9886", description = "Port used for"
+        + " the server2server replication server", tags = {
+        ConfigTag.MANAGEMENT})
     private int port;
 
     public int getPort() {
@@ -169,25 +144,6 @@ public class ReplicationServer {
       this.port = portParam;
       return this;
     }
-
-    public int getReplicationMaxStreams() {
-      return replicationMaxStreams;
-    }
-
-    public void setReplicationMaxStreams(int replicationMaxStreams) {
-      this.replicationMaxStreams = replicationMaxStreams;
-    }
-
-    @PostConstruct
-    public void validate() {
-      if (replicationMaxStreams < 1) {
-        LOG.warn(REPLICATION_STREAMS_LIMIT_KEY + " must be greater than zero " +
-                "and was set to {}. Defaulting to {}",
-            replicationMaxStreams, REPLICATION_MAX_STREAMS_DEFAULT);
-        replicationMaxStreams = REPLICATION_MAX_STREAMS_DEFAULT;
-      }
-    }
-
   }
 
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
index 4cb826c6ec..05a4173eb7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig;
 import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -72,13 +71,6 @@ public class ReplicationSupervisor {
     this.context = context;
   }
 
-  public ReplicationSupervisor(
-      ContainerSet containerSet, StateContext context,
-      ContainerReplicator replicator, ReplicationConfig replicationConfig) {
-    this(containerSet, context, replicator,
-        replicationConfig.getReplicationMaxStreams());
-  }
-
   public ReplicationSupervisor(
       ContainerSet containerSet, StateContext context,
       ContainerReplicator replicator, int poolSize) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java
index fc9b44924a..e14a391dcb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java
@@ -116,7 +116,7 @@ public class DirstreamClientHandler extends ChannelInboundHandlerAdapter {
     }
   }
 
-  public boolean isAtTheEnd() {
+  public boolean isAtTheEnd(){
     return getCurrentFileName().equals(END_MARKER);
   }
   @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
index f25e13c285..9ff4b0aa3d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
@@ -46,7 +46,7 @@ public class DataNodeUpgradeFinalizer extends
   @Override
   public void preFinalizeUpgrade(DatanodeStateMachine dsm)
       throws IOException {
-    if (!canFinalizeDataNode(dsm)) {
+    if(!canFinalizeDataNode(dsm)) {
       // DataNode is not yet ready to finalize.
       // Reset the Finalization state.
       getVersionManager().setUpgradeState(FINALIZATION_REQUIRED);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java
index 3653e6c9fa..ec8494604a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java
@@ -120,7 +120,7 @@ public final class VersionedDatanodeFeatures {
       boolean scmHAEnabled =
           conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY,
           ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT);
-      if (isFinalized(HDDSLayoutFeature.SCM_HA) || scmHAEnabled) {
+      if (isFinalized(HDDSLayoutFeature.SCM_HA) || scmHAEnabled){
         return clusterID;
       } else {
         return scmID;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
index 6aa0554e10..e3ea4aeeaf 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
@@ -27,7 +27,7 @@ import static org.apache.hadoop.hdds.protocol.proto
  * Informs a datanode to register itself with SCM again.
  */
 public class ReregisterCommand extends
-    SCMCommand<ReregisterCommandProto> {
+    SCMCommand<ReregisterCommandProto>{
 
   /**
    * Returns the type of this command.
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
index 6deaddadc9..08ca4c91f5 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
@@ -114,7 +114,7 @@ public class TestHddsSecureDatanodeInit {
   }
 
   @Before
-  public void setUpDNCertClient() {
+  public void setUpDNCertClient(){
 
     FileUtils.deleteQuietly(Paths.get(
         securityConfig.getKeyLocation(DN_COMPONENT).toString(),
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
index 825432290d..15cd4d060c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
@@ -115,7 +115,7 @@ public final class ContainerTestUtils {
   }
 
   public static KeyValueContainer getContainer(long containerId,
-      ContainerLayoutVersion layout,
+      ChunkLayOutVersion layout,
       ContainerProtos.ContainerDataProto.State state) {
     KeyValueContainerData kvData =
         new KeyValueContainerData(containerId,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index eef66550df..157dee65ff 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -131,10 +131,9 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
    * @return - count of reported containers.
    */
   public long getContainerCount() {
-    return nodeContainers.values().parallelStream().mapToLong(
-        (containerMap) -> {
-          return containerMap.size();
-        }).sum();
+    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
+      return containerMap.size();
+    }).sum();
   }
 
   /**
@@ -142,13 +141,11 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
    * @return - number of keys reported.
    */
   public long getKeyCount() {
-    return nodeContainers.values().parallelStream().mapToLong(
-        (containerMap) -> {
-          return containerMap.values().parallelStream().mapToLong(
-              (container) -> {
-                return container.getKeyCount();
-              }).sum();
-        }).sum();
+    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
+      return containerMap.values().parallelStream().mapToLong((container) -> {
+        return container.getKeyCount();
+      }).sum();
+    }).sum();
   }
 
   /**
@@ -156,13 +153,11 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
    * @return - number of bytes used.
    */
   public long getBytesUsed() {
-    return nodeContainers.values().parallelStream().mapToLong(
-        (containerMap) -> {
-          return containerMap.values().parallelStream().mapToLong(
-              (container) -> {
-                return container.getUsed();
-              }).sum();
-        }).sum();
+    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
+      return containerMap.values().parallelStream().mapToLong((container) -> {
+        return container.getUsed();
+      }).sum();
+    }).sum();
   }
 
   /**
@@ -264,7 +259,7 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
     List<StorageReportProto> storageReports =
         nodeReport.getStorageReportList();
 
-    for (StorageReportProto report : storageReports) {
+    for(StorageReportProto report : storageReports) {
       nodeReportProto.addStorageReport(report);
     }
 
@@ -318,7 +313,7 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
   public int getContainerCountsForDatanode(DatanodeDetails datanodeDetails) {
     Map<String, ContainerReplicaProto> cr =
         nodeContainers.get(datanodeDetails);
-    if (cr != null) {
+    if(cr != null) {
       return cr.size();
     }
     return 0;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 5306eb05d9..347961ad9e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy;
@@ -94,7 +94,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVI
 import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_VERSIONS;
 import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1;
 import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK;
+import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK;
 import static org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
@@ -113,7 +113,7 @@ public class TestBlockDeletingService {
   private static String datanodeUuid;
   private static MutableConfigurationSource conf;
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
   private final String schemaVersion;
   private int blockLimitPerInterval;
   private static VolumeSet volumeSet;
@@ -134,17 +134,16 @@ public class TestBlockDeletingService {
    */
   public static class LayoutInfo {
     private final String schemaVersion;
-    private final ContainerLayoutVersion layout;
+    private final ChunkLayOutVersion layout;
 
-    public LayoutInfo(String schemaVersion, ContainerLayoutVersion layout) {
+    public LayoutInfo(String schemaVersion, ChunkLayOutVersion layout) {
       this.schemaVersion = schemaVersion;
       this.layout = layout;
     }
 
     private static List<LayoutInfo> layoutList = new ArrayList<>();
     static {
-      for (ContainerLayoutVersion ch :
-          ContainerLayoutVersion.getAllVersions()) {
+      for (ChunkLayOutVersion ch : ChunkLayOutVersion.getAllVersions()) {
         for (String sch : SCHEMA_VERSIONS) {
           layoutList.add(new LayoutInfo(sch, ch));
         }
@@ -356,8 +355,7 @@ public class TestBlockDeletingService {
           .put(OzoneConsts.PENDING_DELETE_BLOCK_COUNT,
               (long) numOfBlocksPerContainer);
     } catch (IOException exception) {
-      LOG.warn("Meta Data update was not successful for container: "
-          + container);
+      LOG.warn("Meta Data update was not successful for container: "+container);
     }
   }
 
@@ -428,7 +426,7 @@ public class TestBlockDeletingService {
     KeyValueContainerData data = (KeyValueContainerData) containerData.get(0);
     Assert.assertEquals(1, containerData.size());
 
-    try (ReferenceCountedDB meta = BlockUtils.getDB(
+    try(ReferenceCountedDB meta = BlockUtils.getDB(
         (KeyValueContainerData) containerData.get(0), conf)) {
       Map<Long, Container<?>> containerMap = containerSet.getContainerMapCopy();
       // NOTE: this test assumes that all the container is KetValueContainer and
@@ -735,7 +733,7 @@ public class TestBlockDeletingService {
       // in all the containers are deleted)).
       deleteAndWait(service, 2);
 
-      long totalContainerBlocks = blocksPerContainer * containerCount;
+      long totalContainerBlocks = blocksPerContainer*containerCount;
       GenericTestUtils.waitFor(() ->
               totalContainerBlocks * blockSpace ==
                       (totalContainerSpace -
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerLayoutVersion.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java
similarity index 73%
rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerLayoutVersion.java
rename to hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java
index fb5b4914fb..be6c6798b6 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerLayoutVersion.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java
@@ -18,21 +18,21 @@
 
 package org.apache.hadoop.ozone.container.common;
 
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.junit.Test;
 
-import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK;
+import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK;
+import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK;
 import static org.junit.Assert.assertEquals;
 
 /**
- * This class tests ContainerLayoutVersion.
+ * This class tests ChunkLayOutVersion.
  */
-public class TestContainerLayoutVersion {
+public class TestChunkLayOutVersion {
 
   @Test
   public void testVersionCount() {
-    assertEquals(2, ContainerLayoutVersion.getAllVersions().size());
+    assertEquals(2, ChunkLayOutVersion.getAllVersions().size());
   }
 
   @Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
index e55d68cbe3..562775d263 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
@@ -177,7 +177,7 @@ public class TestContainerCache {
     for (Future future: futureList) {
       try {
         future.get();
-      } catch (InterruptedException | ExecutionException e) {
+      } catch (InterruptedException| ExecutionException e) {
         Assert.fail("Should get the DB instance");
       }
     }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
index ab8bd834f7..3814fdd259 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.ozone.container.common;
 
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
 import org.junit.Test;
@@ -41,15 +41,15 @@ public class TestKeyValueContainerData {
 
   private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5);
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestKeyValueContainerData(ContainerLayoutVersion layout) {
+  public TestKeyValueContainerData(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
index c8bb93b26c..700c6c2abe 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
@@ -130,8 +130,7 @@ public class TestSchemaOneBackwardsCompatibility {
    */
   @Test
   public void testDirectTableIterationDisabled() throws Exception {
-    try (ReferenceCountedDB refCountedDB =
-        BlockUtils.getDB(newKvData(), conf)) {
+    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
       DatanodeStore store = refCountedDB.getStore();
 
       assertTableIteratorUnsupported(store.getMetadataTable());
@@ -159,8 +158,7 @@ public class TestSchemaOneBackwardsCompatibility {
    */
   @Test
   public void testBlockIteration() throws IOException {
-    try (ReferenceCountedDB refCountedDB =
-        BlockUtils.getDB(newKvData(), conf)) {
+    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
       assertEquals(TestDB.NUM_DELETED_BLOCKS, countDeletedBlocks(refCountedDB));
 
       assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS,
@@ -280,8 +278,7 @@ public class TestSchemaOneBackwardsCompatibility {
     final long expectedRegularBlocks =
             TestDB.KEY_COUNT - numBlocksToDelete;
 
-    try (ReferenceCountedDB refCountedDB =
-        BlockUtils.getDB(newKvData(), conf)) {
+    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
       // Test results via block iteration.
 
       assertEquals(expectedDeletingBlocks,
@@ -323,8 +320,7 @@ public class TestSchemaOneBackwardsCompatibility {
         new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet,
             metrics, c -> {
         });
-    try (ReferenceCountedDB refCountedDB =
-        BlockUtils.getDB(newKvData(), conf)) {
+    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
       // Read blocks that were already deleted before the upgrade.
       List<? extends Table.KeyValue<String, ChunkInfoList>> deletedBlocks =
               refCountedDB.getStore()
@@ -332,13 +328,13 @@ public class TestSchemaOneBackwardsCompatibility {
 
       Set<String> preUpgradeBlocks = new HashSet<>();
 
-      for (Table.KeyValue<String, ChunkInfoList> chunkListKV: deletedBlocks) {
+      for(Table.KeyValue<String, ChunkInfoList> chunkListKV: deletedBlocks) {
         preUpgradeBlocks.add(chunkListKV.getKey());
         try {
           chunkListKV.getValue();
           Assert.fail("No exception thrown when trying to retrieve old " +
                   "deleted blocks values as chunk lists.");
-        } catch (IOException ex) {
+        } catch(IOException ex) {
           // Exception thrown as expected.
         }
       }
@@ -374,8 +370,7 @@ public class TestSchemaOneBackwardsCompatibility {
 
   @Test
   public void testReadBlockData() throws Exception {
-    try (ReferenceCountedDB refCountedDB =
-        BlockUtils.getDB(newKvData(), conf)) {
+    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
       Table<String, BlockData> blockDataTable =
           refCountedDB.getStore().getBlockDataTable();
 
@@ -400,12 +395,12 @@ public class TestSchemaOneBackwardsCompatibility {
       Assert.assertEquals(TestDB.BLOCK_IDS, decodedKeys);
 
       // Test reading blocks with block iterator.
-      try (BlockIterator<BlockData> iter =
+      try(BlockIterator<BlockData> iter =
               refCountedDB.getStore().getBlockIterator()) {
 
         List<String> iteratorBlockIDs = new ArrayList<>();
 
-        while (iter.hasNext()) {
+        while(iter.hasNext()) {
           long localID = iter.nextBlock().getBlockID().getLocalID();
           iteratorBlockIDs.add(Long.toString(localID));
         }
@@ -417,8 +412,7 @@ public class TestSchemaOneBackwardsCompatibility {
 
   @Test
   public void testReadDeletingBlockData() throws Exception {
-    try (ReferenceCountedDB refCountedDB =
-        BlockUtils.getDB(newKvData(), conf)) {
+    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
       Table<String, BlockData> blockDataTable =
           refCountedDB.getStore().getBlockDataTable();
 
@@ -452,12 +446,12 @@ public class TestSchemaOneBackwardsCompatibility {
       MetadataKeyFilters.KeyPrefixFilter filter =
           MetadataKeyFilters.getDeletingKeyFilter();
 
-      try (BlockIterator<BlockData> iter =
+      try(BlockIterator<BlockData> iter =
               refCountedDB.getStore().getBlockIterator(filter)) {
 
         List<String> iteratorBlockIDs = new ArrayList<>();
 
-        while (iter.hasNext()) {
+        while(iter.hasNext()) {
           long localID = iter.nextBlock().getBlockID().getLocalID();
           iteratorBlockIDs.add(Long.toString(localID));
         }
@@ -469,8 +463,7 @@ public class TestSchemaOneBackwardsCompatibility {
 
   @Test
   public void testReadMetadata() throws Exception {
-    try (ReferenceCountedDB refCountedDB =
-        BlockUtils.getDB(newKvData(), conf)) {
+    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
       Table<String, Long> metadataTable =
           refCountedDB.getStore().getMetadataTable();
 
@@ -486,8 +479,7 @@ public class TestSchemaOneBackwardsCompatibility {
 
   @Test
   public void testReadDeletedBlocks() throws Exception {
-    try (ReferenceCountedDB refCountedDB =
-        BlockUtils.getDB(newKvData(), conf)) {
+    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
       Table<String, ChunkInfoList> deletedBlocksTable =
           refCountedDB.getStore().getDeletedBlocksTable();
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
index 85a8bda8a6..00f68ef3dc 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
@@ -60,11 +60,11 @@ public class TestBlockData {
     assertChunks(expected, computed);
     long offset = 0;
     int n = 5;
-    for (int i = 0; i < n; i++) {
+    for(int i = 0; i < n; i++) {
       offset += assertAddChunk(expected, computed, offset);
     }
 
-    for (; !expected.isEmpty();) {
+    for(; !expected.isEmpty();) {
       removeChunk(expected, computed);
     }
   }
@@ -125,7 +125,7 @@ public class TestBlockData {
     assertChunks(expected, computed);
     long offset = 0;
     int n = 5;
-    for (int i = 0; i < n; i++) {
+    for(int i = 0; i < n; i++) {
       offset += addChunk(expected, offset).getLen();
       LOG.info("setChunk: {}", toString(expected));
       computed.setChunks(expected);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
index 1ca9f9e857..84f50087e1 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
@@ -50,7 +50,7 @@ public class TestDatanodeVersionFile {
   private int lv;
 
   @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
+  public TemporaryFolder folder= new TemporaryFolder();
 
   @Before
   public void setup() throws IOException {
@@ -70,7 +70,7 @@ public class TestDatanodeVersionFile {
   }
 
   @Test
-  public void testCreateAndReadVersionFile() throws IOException {
+  public void testCreateAndReadVersionFile() throws IOException{
 
     //Check VersionFile exists
     assertTrue(versionFile.exists());
@@ -88,7 +88,7 @@ public class TestDatanodeVersionFile {
   }
 
   @Test
-  public void testIncorrectClusterId() throws IOException {
+  public void testIncorrectClusterId() throws IOException{
     try {
       String randomClusterID = UUID.randomUUID().toString();
       HddsVolumeUtil.getClusterID(properties, versionFile,
@@ -100,7 +100,7 @@ public class TestDatanodeVersionFile {
   }
 
   @Test
-  public void testVerifyCTime() throws IOException {
+  public void testVerifyCTime() throws IOException{
     long invalidCTime = -10;
     dnVersionFile = new DatanodeVersionFile(
         storageID, clusterID, datanodeUUID, invalidCTime, lv);
@@ -117,7 +117,7 @@ public class TestDatanodeVersionFile {
   }
 
   @Test
-  public void testVerifyLayOut() throws IOException {
+  public void testVerifyLayOut() throws IOException{
     int invalidLayOutVersion = 100;
     dnVersionFile = new DatanodeVersionFile(
         storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index 0bfdb173a4..4dc38e9a25 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
 import org.apache.ozone.test.GenericTestUtils;
@@ -39,7 +39,7 @@ import java.io.IOException;
 import java.time.Instant;
 import java.util.UUID;
 
-import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK;
+import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -61,16 +61,16 @@ public class TestContainerDataYaml {
   private static final String VOLUME_OWNER = "hdfs";
   private static final String CONTAINER_DB_TYPE = "RocksDB";
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
   private OzoneConfiguration conf = new OzoneConfiguration();
     
-  public TestContainerDataYaml(ContainerLayoutVersion layout) {
+  public TestContainerDataYaml(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   /**
@@ -126,7 +126,7 @@ public class TestContainerDataYaml {
     assertEquals(containerFile.getParent(), kvData.getChunksPath());
     assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData
         .getState());
-    assertEquals(layout, kvData.getLayoutVersion());
+    assertEquals(layout, kvData.getLayOutVersion());
     assertEquals(0, kvData.getMetadata().size());
     assertEquals(MAXSIZE, kvData.getMaxSize());
     assertEquals(MAXSIZE, kvData.getMaxSize());
@@ -160,7 +160,7 @@ public class TestContainerDataYaml {
     assertEquals(containerFile.getParent(), kvData.getChunksPath());
     assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
         .getState());
-    assertEquals(layout, kvData.getLayoutVersion());
+    assertEquals(layout, kvData.getLayOutVersion());
     assertEquals(2, kvData.getMetadata().size());
     assertEquals(VOLUME_OWNER, kvData.getMetadata().get(OzoneConsts.VOLUME));
     assertEquals(OzoneConsts.OZONE,
@@ -174,7 +174,7 @@ public class TestContainerDataYaml {
   }
 
   @Test
-  public void testIncorrectContainerFile() throws IOException {
+  public void testIncorrectContainerFile() throws IOException{
     try {
       String containerFile = "incorrect.container";
       //Get file from resources folder
@@ -217,7 +217,7 @@ public class TestContainerDataYaml {
           .getChunksPath());
       assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
           .getMetadataPath());
-      assertEquals(FILE_PER_CHUNK, kvData.getLayoutVersion());
+      assertEquals(FILE_PER_CHUNK, kvData.getLayOutVersion());
       assertEquals(2, kvData.getMetadata().size());
 
     } catch (Exception ex) {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index 92ebbcacc1..14f46d944f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService;
@@ -63,15 +63,15 @@ public class TestContainerDeletionChoosingPolicy {
   private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0;
   private static final int SERVICE_INTERVAL_IN_MILLISECONDS = 1000;
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestContainerDeletionChoosingPolicy(ContainerLayoutVersion layout) {
+  public TestContainerDeletionChoosingPolicy(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Before
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 7fbd7546c0..5e80a7edfd 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -55,7 +55,7 @@ import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
@@ -115,15 +115,15 @@ public class TestContainerPersistence {
   @Rule
   public Timeout testTimeout = Timeout.seconds(300);
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestContainerPersistence(ContainerLayoutVersion layout) {
+  public TestContainerPersistence(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @BeforeClass
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
index d51d78e4ad..f2af230a56 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerExcep
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.ozone.test.GenericTestUtils;
@@ -61,15 +61,15 @@ public class TestContainerSet {
 
   private static final int FIRST_ID = 2;
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestContainerSet(ContainerLayoutVersion layout) {
+  public TestContainerSet(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Test
@@ -122,11 +122,11 @@ public class TestContainerSet {
     Iterator<Container<?>> iterator = containerSet.getContainerIterator();
 
     int count = 0;
-    while (iterator.hasNext()) {
+    while(iterator.hasNext()) {
       Container kv = iterator.next();
       ContainerData containerData = kv.getContainerData();
       long containerId = containerData.getContainerID();
-      if (containerId % 2 == 0) {
+      if (containerId%2 == 0) {
         assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
             containerData.getState());
       } else {
@@ -146,7 +146,7 @@ public class TestContainerSet {
       Container kv = containerMapIterator.next().getValue();
       ContainerData containerData = kv.getContainerData();
       long containerId = containerData.getContainerID();
-      if (containerId % 2 == 0) {
+      if (containerId%2 == 0) {
         assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
             containerData.getState());
       } else {
@@ -167,12 +167,12 @@ public class TestContainerSet {
     Mockito.when(vol2.getStorageID()).thenReturn("uuid-2");
 
     ContainerSet containerSet = new ContainerSet();
-    for (int i = 0; i < 10; i++) {
+    for (int i=0; i<10; i++) {
       KeyValueContainerData kvData = new KeyValueContainerData(i,
           layout,
           (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
           UUID.randomUUID().toString());
-      if (i % 2 == 0) {
+      if (i%2 == 0) {
         kvData.setVolume(vol1);
       } else {
         kvData.setVolume(vol2);
@@ -307,7 +307,7 @@ public class TestContainerSet {
           layout,
           (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
           UUID.randomUUID().toString());
-      if (i % 2 == 0) {
+      if (i%2 == 0) {
         kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
       } else {
         kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index 9b8da361b2..618dd62913 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingP
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.ozone.test.GenericTestUtils;
@@ -83,17 +83,17 @@ import static org.mockito.Mockito.verify;
 public class TestHddsDispatcher {
 
   public static final Consumer<ContainerReplicaProto> NO_OP_ICR_SENDER =
-      c -> { };
+      c -> {};
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestHddsDispatcher(ContainerLayoutVersion layout) {
+  public TestHddsDispatcher(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
index 2b1bc3d248..f969148a16 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
@@ -82,7 +82,7 @@ public class TestHandler {
   }
 
   @After
-  public void tearDown() {
+  public void tearDown(){
     ContainerMetrics.remove();
   }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
index f2770d2941..83e44d3adf 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
@@ -198,7 +198,7 @@ public class TestReportPublisher {
     GeneratedMessage report =
         ((CRLStatusReportPublisher) publisher).getReport();
     Assert.assertNotNull(report);
-    for (Descriptors.FieldDescriptor descriptor :
+    for(Descriptors.FieldDescriptor descriptor :
         report.getDescriptorForType().getFields()) {
       if (descriptor.getNumber() ==
           CRLStatusReport.RECEIVEDCRLID_FIELD_NUMBER) {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
index 5f1b0a6320..1b4265476a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
@@ -28,6 +28,8 @@ import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConf
 import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.DISK_CHECK_MIN_GAP_DEFAULT;
 import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.DISK_CHECK_TIMEOUT_DEFAULT;
 import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.DISK_CHECK_TIMEOUT_KEY;
+import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.REPLICATION_MAX_STREAMS_DEFAULT;
+import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.REPLICATION_STREAMS_LIMIT_KEY;
 import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY;
 import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT;
 import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.FAILED_DATA_VOLUMES_TOLERATED_KEY;
@@ -44,12 +46,14 @@ public class TestDatanodeConfiguration {
   @Test
   public void acceptsValidValues() {
     // GIVEN
+    int validReplicationLimit = 123;
     int validDeleteThreads = 42;
     long validDiskCheckIntervalMinutes = 60;
     int validFailedVolumesTolerated = 10;
     long validDiskCheckMinGap = 2;
     long validDiskCheckTimeout = 1;
     OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(REPLICATION_STREAMS_LIMIT_KEY, validReplicationLimit);
     conf.setInt(CONTAINER_DELETE_THREADS_MAX_KEY, validDeleteThreads);
     conf.setLong(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY,
         validDiskCheckIntervalMinutes);
@@ -66,6 +70,7 @@ public class TestDatanodeConfiguration {
     DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);
 
     // THEN
+    assertEquals(validReplicationLimit, subject.getReplicationMaxStreams());
     assertEquals(validDeleteThreads, subject.getContainerDeleteThreads());
     assertEquals(validDiskCheckIntervalMinutes,
         subject.getPeriodicDiskCheckIntervalMinutes());
@@ -82,12 +87,14 @@ public class TestDatanodeConfiguration {
   @Test
   public void overridesInvalidValues() {
     // GIVEN
+    int invalidReplicationLimit = -5;
     int invalidDeleteThreads = 0;
     long invalidDiskCheckIntervalMinutes = -1;
     int invalidFailedVolumesTolerated = -2;
     long invalidDiskCheckMinGap = -1;
     long invalidDiskCheckTimeout = -1;
     OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(REPLICATION_STREAMS_LIMIT_KEY, invalidReplicationLimit);
     conf.setInt(CONTAINER_DELETE_THREADS_MAX_KEY, invalidDeleteThreads);
     conf.setLong(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY,
         invalidDiskCheckIntervalMinutes);
@@ -104,6 +111,8 @@ public class TestDatanodeConfiguration {
     DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);
 
     // THEN
+    assertEquals(REPLICATION_MAX_STREAMS_DEFAULT,
+        subject.getReplicationMaxStreams());
     assertEquals(CONTAINER_DELETE_THREADS_DEFAULT,
         subject.getContainerDeleteThreads());
     assertEquals(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT,
@@ -127,6 +136,8 @@ public class TestDatanodeConfiguration {
     DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);
 
     // THEN
+    assertEquals(REPLICATION_MAX_STREAMS_DEFAULT,
+        subject.getReplicationMaxStreams());
     assertEquals(CONTAINER_DELETE_THREADS_DEFAULT,
         subject.getContainerDeleteThreads());
     assertEquals(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index de9968128e..7e1ea76035 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
@@ -28,7 +28,7 @@ import org.apache.hadoop.ozone.container.common.statemachine
     .DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
@@ -72,15 +72,15 @@ public class TestCloseContainerCommandHandler {
   private CloseContainerCommandHandler subject =
       new CloseContainerCommandHandler();
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestCloseContainerCommandHandler(ContainerLayoutVersion layout) {
+  public TestCloseContainerCommandHandler(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Before
@@ -214,7 +214,7 @@ public class TestCloseContainerCommandHandler {
     } catch (IOException e) {
 
       GenericTestUtils.assertExceptionContains("The Container " +
-                      "is not found. ContainerID: " + containerID, e);
+                      "is not found. ContainerID: "+containerID, e);
     }
   }
 
@@ -227,7 +227,7 @@ public class TestCloseContainerCommandHandler {
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("The Container is in " +
               "the MissingContainerSet hence we can't close it. " +
-              "ContainerID: " + containerID, e);
+              "ContainerID: "+containerID, e);
     }
   }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
index dfe7cb314b..990d4c95bf 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
@@ -114,7 +114,7 @@ public class TestRoundRobinVolumeChoosingPolicy {
     try {
       policy.chooseVolume(volumes, blockSize);
       Assert.fail("expected to throw DiskOutOfSpaceException");
-    } catch (DiskOutOfSpaceException e) {
+    } catch(DiskOutOfSpaceException e) {
       Assert.assertEquals("Not returning the expected message",
           "Out of space: The volume with the most available space (=" + 200
               + " B) is less than the container size (=" + blockSize + " B).",
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
index 72faf570ae..55b4c39b5d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
 import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
@@ -98,10 +98,10 @@ public class TestStorageVolumeChecker {
    */
   private final VolumeCheckResult expectedVolumeHealth;
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
   public TestStorageVolumeChecker(VolumeCheckResult result,
-      ContainerLayoutVersion layout) {
+      ChunkLayOutVersion layout) {
     this.expectedVolumeHealth = result;
     this.layout = layout;
   }
@@ -127,7 +127,7 @@ public class TestStorageVolumeChecker {
   @Parameters
   public static Collection<Object[]> data() {
     List<Object[]> values = new ArrayList<>();
-    for (ContainerLayoutVersion layout : ContainerLayoutVersion.values()) {
+    for (ChunkLayOutVersion layout : ChunkLayOutVersion.values()) {
       for (VolumeCheckResult result : VolumeCheckResult.values()) {
         values.add(new Object[]{result, layout});
       }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index f0869c9c6f..52bf3d3200 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -225,7 +225,7 @@ public class TestVolumeSet {
   }
 
   @Test
-  public void testFailVolumes() throws  Exception {
+  public void testFailVolumes() throws  Exception{
     MutableVolumeSet volSet = null;
     File readOnlyVolumePath = new File(baseDir);
     //Set to readonly, so that this volume will be failed
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ChunkLayoutTestInfo.java
similarity index 84%
rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java
rename to hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ChunkLayoutTestInfo.java
index 0adaa09bd8..34e31b766e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ChunkLayoutTestInfo.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone.container.keyvalue;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerDummyImpl;
 import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy;
 import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerChunkStrategy;
@@ -29,7 +29,7 @@ import java.io.File;
 
 import static java.util.stream.Collectors.toList;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_LAYOUT_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -37,7 +37,7 @@ import static org.junit.Assert.assertTrue;
 /**
  * Interface of parameters for testing different chunk layout implementations.
  */
-public enum ContainerLayoutTestInfo {
+public enum ChunkLayoutTestInfo {
 
   DUMMY {
     @Override
@@ -51,7 +51,7 @@ public enum ContainerLayoutTestInfo {
     }
 
     @Override
-    public ContainerLayoutVersion getLayout() {
+    public ChunkLayOutVersion getLayout() {
       return null;
     }
 
@@ -73,8 +73,8 @@ public enum ContainerLayoutTestInfo {
     }
 
     @Override
-    public ContainerLayoutVersion getLayout() {
-      return ContainerLayoutVersion.FILE_PER_CHUNK;
+    public ChunkLayOutVersion getLayout() {
+      return ChunkLayOutVersion.FILE_PER_CHUNK;
     }
   },
 
@@ -90,8 +90,8 @@ public enum ContainerLayoutTestInfo {
     }
 
     @Override
-    public ContainerLayoutVersion getLayout() {
-      return ContainerLayoutVersion.FILE_PER_BLOCK;
+    public ChunkLayOutVersion getLayout() {
+      return ChunkLayOutVersion.FILE_PER_BLOCK;
     }
   };
 
@@ -101,10 +101,10 @@ public enum ContainerLayoutTestInfo {
   public abstract void validateFileCount(File dir, long blockCount,
       long chunkCount);
 
-  public abstract ContainerLayoutVersion getLayout();
+  public abstract ChunkLayOutVersion getLayout();
 
   public void updateConfig(OzoneConfiguration config) {
-    config.set(OZONE_SCM_CONTAINER_LAYOUT_KEY, getLayout().name());
+    config.set(OZONE_SCM_CHUNK_LAYOUT_KEY, getLayout().name());
   }
 
   private static void assertFileCount(File dir, long count) {
@@ -116,8 +116,8 @@ public enum ContainerLayoutTestInfo {
     assertEquals(count, files.length);
   }
 
-  public static Iterable<Object[]> containerLayoutParameters() {
-    return ContainerLayoutVersion.getAllVersions().stream()
+  public static Iterable<Object[]> chunkLayoutParameters() {
+    return ChunkLayOutVersion.getAllVersions().stream()
         .map(each -> new Object[] {each})
         .collect(toList());
   }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index fbf39f7028..264c6bbf1c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
 import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
@@ -42,8 +42,8 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.ozone.test.GenericTestUtils;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK;
+import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK;
+import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK;
 
 import org.junit.After;
 import static org.junit.Assert.assertEquals;
@@ -68,9 +68,9 @@ public class TestKeyValueBlockIterator {
   private OzoneConfiguration conf;
   private File testRoot;
   private ReferenceCountedDB db;
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestKeyValueBlockIterator(ContainerLayoutVersion layout) {
+  public TestKeyValueBlockIterator(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
@@ -120,7 +120,7 @@ public class TestKeyValueBlockIterator {
 
     // Default filter used is all unprefixed blocks.
     List<Long> unprefixedBlockIDs = blockIDs.get("");
-    try (BlockIterator<BlockData> keyValueBlockIterator =
+    try(BlockIterator<BlockData> keyValueBlockIterator =
                 db.getStore().getBlockIterator()) {
 
       Iterator<Long> blockIDIter = unprefixedBlockIDs.iterator();
@@ -152,7 +152,7 @@ public class TestKeyValueBlockIterator {
   @Test
   public void testKeyValueBlockIteratorWithNextBlock() throws Exception {
     List<Long> blockIDs = createContainerWithBlocks(CONTAINER_ID, 2);
-    try (BlockIterator<BlockData> keyValueBlockIterator =
+    try(BlockIterator<BlockData> keyValueBlockIterator =
                 db.getStore().getBlockIterator()) {
       assertEquals((long)blockIDs.get(0),
               keyValueBlockIterator.nextBlock().getLocalID());
@@ -171,7 +171,7 @@ public class TestKeyValueBlockIterator {
   @Test
   public void testKeyValueBlockIteratorWithHasNext() throws Exception {
     List<Long> blockIDs = createContainerWithBlocks(CONTAINER_ID, 2);
-    try (BlockIterator<BlockData> blockIter =
+    try(BlockIterator<BlockData> blockIter =
                 db.getStore().getBlockIterator()) {
 
       // Even calling multiple times hasNext() should not move entry forward.
@@ -209,7 +209,7 @@ public class TestKeyValueBlockIterator {
     int deletingBlocks = 5;
     Map<String, List<Long>> blockIDs = createContainerWithBlocks(CONTAINER_ID,
             normalBlocks, deletingBlocks);
-    try (BlockIterator<BlockData> keyValueBlockIterator =
+    try(BlockIterator<BlockData> keyValueBlockIterator =
                 db.getStore().getBlockIterator(
                         MetadataKeyFilters.getDeletingKeyFilter())) {
       List<Long> deletingBlockIDs =
@@ -230,7 +230,7 @@ public class TestKeyValueBlockIterator {
   public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws
       Exception {
     createContainerWithBlocks(CONTAINER_ID, 0, 5);
-    try (BlockIterator<BlockData> keyValueBlockIterator =
+    try(BlockIterator<BlockData> keyValueBlockIterator =
                 db.getStore().getBlockIterator()) {
       //As all blocks are deleted blocks, blocks does not match with normal key
       // filter.
@@ -288,7 +288,7 @@ public class TestKeyValueBlockIterator {
    */
   private void testWithFilter(MetadataKeyFilters.KeyPrefixFilter filter,
                               List<Long> expectedIDs) throws Exception {
-    try (BlockIterator<BlockData> iterator =
+    try(BlockIterator<BlockData> iterator =
                 db.getStore().getBlockIterator(filter)) {
       // Test seek.
       iterator.seekToFirst();
@@ -364,7 +364,7 @@ public class TestKeyValueBlockIterator {
             Map<String, Integer> prefixCounts) throws Exception {
     // Create required block data.
     Map<String, List<Long>> blockIDs = new HashMap<>();
-    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
+    try(ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
         conf)) {
 
       List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 1616572577..68cd2f6fb5 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.utils.db.RDBStore;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile;
@@ -101,20 +101,20 @@ public class TestKeyValueContainer {
   private KeyValueContainer keyValueContainer;
   private UUID datanodeId;
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
   // Use one configuration object across parameterized runs of tests.
   // This preserves the column family options in the container options
   // cache for testContainersShareColumnFamilyOptions.
   private static final OzoneConfiguration CONF = new OzoneConfiguration();
 
-  public TestKeyValueContainer(ContainerLayoutVersion layout) {
+  public TestKeyValueContainer(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Before
@@ -179,7 +179,7 @@ public class TestKeyValueContainer {
     //create a new one
     KeyValueContainerData containerData =
         new KeyValueContainerData(containerId,
-            keyValueContainerData.getLayoutVersion(),
+            keyValueContainerData.getLayOutVersion(),
             keyValueContainerData.getMaxSize(), UUID.randomUUID().toString(),
             datanodeId.toString());
     KeyValueContainer container = new KeyValueContainer(containerData, CONF);
@@ -200,8 +200,8 @@ public class TestKeyValueContainer {
         containerData.getState());
     assertEquals(numberOfKeysToWrite,
         containerData.getKeyCount());
-    assertEquals(keyValueContainerData.getLayoutVersion(),
-        containerData.getLayoutVersion());
+    assertEquals(keyValueContainerData.getLayOutVersion(),
+        containerData.getLayOutVersion());
     assertEquals(keyValueContainerData.getMaxSize(),
         containerData.getMaxSize());
     assertEquals(keyValueContainerData.getBytesUsed(),
@@ -221,7 +221,7 @@ public class TestKeyValueContainer {
     //Import failure should cleanup the container directory
     containerData =
         new KeyValueContainerData(containerId + 1,
-            keyValueContainerData.getLayoutVersion(),
+            keyValueContainerData.getLayOutVersion(),
             keyValueContainerData.getMaxSize(), UUID.randomUUID().toString(),
             datanodeId.toString());
     container = new KeyValueContainer(containerData, CONF);
@@ -446,7 +446,7 @@ public class TestKeyValueContainer {
         keyValueContainerData, CONF);
     keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
 
-    try (ReferenceCountedDB db =
+    try(ReferenceCountedDB db =
         BlockUtils.getDB(keyValueContainerData, CONF)) {
       RDBStore store = (RDBStore) db.getStore().getStore();
       long defaultCacheSize = 64 * OzoneConsts.MB;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
index d50a091a6a..71175b6144 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
@@ -74,7 +74,7 @@ import static org.junit.Assert.assertFalse;
   private static final Logger LOG =
       LoggerFactory.getLogger(TestKeyValueContainerCheck.class);
 
-  private final ContainerLayoutTestInfo chunkManagerTestInfo;
+  private final ChunkLayoutTestInfo chunkManagerTestInfo;
   private KeyValueContainer container;
   private KeyValueContainerData containerData;
   private MutableVolumeSet volumeSet;
@@ -82,15 +82,14 @@ import static org.junit.Assert.assertFalse;
   private File testRoot;
   private ChunkManager chunkManager;
 
-  public TestKeyValueContainerCheck(
-      ContainerLayoutTestInfo chunkManagerTestInfo) {
+  public TestKeyValueContainerCheck(ChunkLayoutTestInfo chunkManagerTestInfo) {
     this.chunkManagerTestInfo = chunkManagerTestInfo;
   }
 
   @Parameterized.Parameters public static Collection<Object[]> data() {
     return Arrays.asList(new Object[][] {
-        {ContainerLayoutTestInfo.FILE_PER_CHUNK},
-        {ContainerLayoutTestInfo.FILE_PER_BLOCK}
+        {ChunkLayoutTestInfo.FILE_PER_CHUNK},
+        {ChunkLayoutTestInfo.FILE_PER_BLOCK}
     });
   }
 
@@ -186,7 +185,7 @@ import static org.junit.Assert.assertFalse;
       try (RandomAccessFile file = new RandomAccessFile(chunkFile, "rws")) {
         file.setLength(length / 2);
       }
-      assertEquals(length / 2, chunkFile.length());
+      assertEquals(length/2, chunkFile.length());
     }
 
     // metadata check should pass.
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
index d7b520aba6..e1526dbd95 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
@@ -79,15 +79,15 @@ public class TestKeyValueContainerMarkUnhealthy {
   private KeyValueContainer keyValueContainer;
   private UUID datanodeId;
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestKeyValueContainerMarkUnhealthy(ContainerLayoutVersion layout) {
+  public TestKeyValueContainerMarkUnhealthy(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Before
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index a5d225469d..583d043e84 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerT
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdds.security.token.TokenVerifier;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
@@ -82,18 +82,18 @@ public class TestKeyValueHandler {
 
   private static final long DUMMY_CONTAINER_ID = 9999;
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
   private HddsDispatcher dispatcher;
   private KeyValueHandler handler;
 
-  public TestKeyValueHandler(ContainerLayoutVersion layout) {
+  public TestKeyValueHandler(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Before
@@ -261,7 +261,7 @@ public class TestKeyValueHandler {
   }
 
   @Test
-  public void testVolumeSetInKeyValueHandler() throws Exception {
+  public void testVolumeSetInKeyValueHandler() throws Exception{
     File path = GenericTestUtils.getRandomizedTestDir();
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath());
@@ -296,7 +296,7 @@ public class TestKeyValueHandler {
       try {
         new KeyValueHandler(conf,
             context.getParent().getDatanodeDetails().getUuidString(),
-            cset, volumeSet, metrics, c -> { });
+            cset, volumeSet, metrics, c->{});
       } catch (RuntimeException ex) {
         GenericTestUtils.assertExceptionContains("class org.apache.hadoop" +
             ".ozone.container.common.impl.HddsDispatcher not org.apache" +
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
index 793aea5122..d2b0f5e87c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
@@ -36,7 +36,7 @@ import org.apache.commons.compress.archivers.ArchiveOutputStream;
 import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
 import org.apache.commons.compress.compressors.CompressorOutputStream;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker;
 
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
@@ -87,15 +87,15 @@ public class TestTarContainerPacker {
 
   private static final AtomicInteger CONTAINER_ID = new AtomicInteger(1);
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestTarContainerPacker(ContainerLayoutVersion layout) {
+  public TestTarContainerPacker(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @BeforeClass
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
index bc6371e400..c1ab19fee8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
@@ -21,14 +21,14 @@ import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
@@ -68,7 +68,7 @@ public abstract class AbstractTestChunkManager {
   @Rule
   public TemporaryFolder folder = new TemporaryFolder();
 
-  protected abstract ContainerLayoutTestInfo getStrategy();
+  protected abstract ChunkLayoutTestInfo getStrategy();
 
   protected ChunkManager createTestSubject() {
     blockManager = new BlockManagerImpl(new OzoneConfiguration());
@@ -92,7 +92,7 @@ public abstract class AbstractTestChunkManager {
         .thenReturn(hddsVolume);
 
     keyValueContainerData = new KeyValueContainerData(1L,
-        ContainerLayoutVersion.getConfiguredVersion(config),
+        ChunkLayOutVersion.getConfiguredVersion(config),
         (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
         datanodeId.toString());
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
index defc02e78e..23f690eed5 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
@@ -214,7 +214,7 @@ public abstract class CommonChunkManagerTestCases
 
     BlockData blockData = new BlockData(blockID);
     // WHEN
-    for (int i = 0; i < count; i++) {
+    for (int i = 0; i< count; i++) {
       ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localID, i),
           i * len, len);
       chunkManager.writeChunk(container, blockID, info, data, context);
@@ -228,7 +228,7 @@ public abstract class CommonChunkManagerTestCases
     assertTrue(getHddsVolume().getVolumeIOStats().getWriteTime() > 0);
 
     // WHEN
-    for (int i = 0; i < count; i++) {
+    for (int i = 0; i< count; i++) {
       ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localID, i),
           i * len, len);
       chunkManager.readChunk(container, blockID, info, context);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
index 77eae5642a..34455a097a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
@@ -26,12 +26,12 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerExcep
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.ozone.test.GenericTestUtils;
@@ -75,15 +75,15 @@ public class TestBlockManagerImpl {
   private BlockID blockID;
   private BlockID blockID1;
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestBlockManagerImpl(ContainerLayoutVersion layout) {
+  public TestBlockManagerImpl(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Before
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java
index e4bbe5073e..d882ba4f9e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue.impl;
 
 import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.junit.Test;
 
@@ -32,8 +32,8 @@ import static org.junit.Assert.assertNotNull;
 public class TestChunkManagerDummyImpl extends AbstractTestChunkManager {
 
   @Override
-  protected ContainerLayoutTestInfo getStrategy() {
-    return ContainerLayoutTestInfo.DUMMY;
+  protected ChunkLayoutTestInfo getStrategy() {
+    return ChunkLayoutTestInfo.DUMMY;
   }
 
   @Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
index c3fc33fafb..f3be6e2fb3 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.junit.Test;
@@ -139,7 +139,7 @@ public class TestFilePerBlockStrategy extends CommonChunkManagerTestCases {
   }
 
   @Override
-  protected ContainerLayoutTestInfo getStrategy() {
-    return ContainerLayoutTestInfo.FILE_PER_BLOCK;
+  protected ChunkLayoutTestInfo getStrategy() {
+    return ChunkLayoutTestInfo.FILE_PER_BLOCK;
   }
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java
index 54812700f6..0286b3582d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java
@@ -22,9 +22,9 @@ import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
@@ -42,8 +42,8 @@ import static org.junit.Assert.assertTrue;
 public class TestFilePerChunkStrategy extends CommonChunkManagerTestCases {
 
   @Override
-  protected ContainerLayoutTestInfo getStrategy() {
-    return ContainerLayoutTestInfo.FILE_PER_CHUNK;
+  protected ChunkLayoutTestInfo getStrategy() {
+    return ChunkLayoutTestInfo.FILE_PER_CHUNK;
   }
 
   @Test
@@ -66,7 +66,7 @@ public class TestFilePerChunkStrategy extends CommonChunkManagerTestCases {
 
     long term = 0;
     long index = 0;
-    File chunkFile = ContainerLayoutVersion.FILE_PER_CHUNK
+    File chunkFile = ChunkLayOutVersion.FILE_PER_CHUNK
         .getChunkFile(container.getContainerData(), blockID, chunkInfo);
     File tempChunkFile = new File(chunkFile.getParent(),
         chunkFile.getName() + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER
@@ -109,7 +109,7 @@ public class TestFilePerChunkStrategy extends CommonChunkManagerTestCases {
 
     ChunkInfo oldDatanodeChunkInfo = new ChunkInfo(chunkInfo.getChunkName(),
         offset, chunkInfo.getLen());
-    File file = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile(
+    File file = ChunkLayOutVersion.FILE_PER_CHUNK.getChunkFile(
         container.getContainerData(), blockID, chunkInfo);
     ChunkUtils.writeData(file,
         ChunkBuffer.wrap(getData()), offset, chunkInfo.getLen(),
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index 674ae2dace..2cd9673008 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
@@ -94,9 +94,9 @@ public class TestContainerReader {
     Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
         .thenReturn(hddsVolume);
 
-    for (int i = 0; i < 2; i++) {
+    for (int i=0; i<2; i++) {
       KeyValueContainerData keyValueContainerData = new KeyValueContainerData(i,
-          ContainerLayoutVersion.FILE_PER_BLOCK,
+          ChunkLayOutVersion.FILE_PER_BLOCK,
           (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
           datanodeId.toString());
 
@@ -124,7 +124,7 @@ public class TestContainerReader {
 
   private void markBlocksForDelete(KeyValueContainer keyValueContainer,
       boolean setMetaData, List<Long> blockNames, int count) throws Exception {
-    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
+    try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
         .getContainerData(), conf)) {
 
       for (int i = 0; i < count; i++) {
@@ -154,7 +154,7 @@ public class TestContainerReader {
     long containerId = keyValueContainer.getContainerData().getContainerID();
 
     List<Long> blkNames = new ArrayList<>();
-    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
+    try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
         .getContainerData(), conf)) {
 
       for (int i = 0; i < blockCount; i++) {
@@ -197,7 +197,7 @@ public class TestContainerReader {
 
     Assert.assertEquals(2, containerSet.containerCount());
 
-    for (int i = 0; i < 2; i++) {
+    for (int i=0; i < 2; i++) {
       Container keyValueContainer = containerSet.getContainer(i);
 
       KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
@@ -235,7 +235,7 @@ public class TestContainerReader {
     int containerCount = 3;
     for (int i = 0; i < containerCount; i++) {
       KeyValueContainerData keyValueContainerData = new KeyValueContainerData(i,
-          ContainerLayoutVersion.FILE_PER_BLOCK,
+          ChunkLayOutVersion.FILE_PER_BLOCK,
           (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
           datanodeId.toString());
       KeyValueContainer keyValueContainer =
@@ -285,7 +285,7 @@ public class TestContainerReader {
     blockCount = containerCount;
     for (int i = 0; i < containerCount; i++) {
       KeyValueContainerData keyValueContainerData =
-          new KeyValueContainerData(i, ContainerLayoutVersion.FILE_PER_BLOCK,
+          new KeyValueContainerData(i, ChunkLayOutVersion.FILE_PER_BLOCK,
               (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
               datanodeId.toString());
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 8e1458dc47..1aa2940ceb 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
@@ -43,7 +43,7 @@ import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
@@ -87,15 +87,15 @@ public class TestOzoneContainer {
   private HashMap<String, Long> commitSpaceMap; //RootDir -> committed space
   private final int numTestContainers = 10;
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestOzoneContainer(ContainerLayoutVersion layout) {
+  public TestOzoneContainer(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Before
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java
index 099ca9c298..cf6ece3b45 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java
@@ -126,18 +126,18 @@ public class TestGrpcOutputStream {
   public void bufferFlushedWhenFull() throws IOException {
     byte[] bytes = getRandomBytes(bufferSize);
 
-    subject.write(bytes, 0, bufferSize - 1);
-    subject.write(bytes[bufferSize - 1]);
+    subject.write(bytes, 0, bufferSize-1);
+    subject.write(bytes[bufferSize-1]);
     verify(observer).onNext(any());
 
     subject.write(bytes[0]);
-    subject.write(bytes, 1, bufferSize - 1);
+    subject.write(bytes, 1, bufferSize-1);
     verify(observer, times(2)).onNext(any());
   }
 
   @Test
   public void singleArraySpansMultipleResponses() throws IOException {
-    byte[] bytes = writeBytes(subject, 2 * bufferSize + bufferSize / 2);
+    byte[] bytes = writeBytes(subject, 2 * bufferSize + bufferSize/2);
     subject.close();
 
     verifyResponses(bytes);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationConfig.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationConfig.java
deleted file mode 100644
index 6ab32d6cf9..0000000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationConfig.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig;
-import org.junit.Test;
-
-import static org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig.REPLICATION_MAX_STREAMS_DEFAULT;
-import static org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig.REPLICATION_STREAMS_LIMIT_KEY;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Tests for {@link ReplicationConfig}.
- */
-public class TestReplicationConfig {
-
-  @Test
-  public void acceptsValidValues() {
-    // GIVEN
-    int validReplicationLimit = 123;
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setInt(REPLICATION_STREAMS_LIMIT_KEY, validReplicationLimit);
-
-    // WHEN
-    ReplicationConfig subject = conf.getObject(ReplicationConfig.class);
-
-    // THEN
-    assertEquals(validReplicationLimit, subject.getReplicationMaxStreams());
-  }
-
-  @Test
-  public void overridesInvalidValues() {
-    // GIVEN
-    int invalidReplicationLimit = -5;
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setInt(REPLICATION_STREAMS_LIMIT_KEY, invalidReplicationLimit);
-
-    // WHEN
-    ReplicationConfig subject = conf.getObject(ReplicationConfig.class);
-
-    // THEN
-    assertEquals(REPLICATION_MAX_STREAMS_DEFAULT,
-        subject.getReplicationMaxStreams());
-  }
-
-  @Test
-  public void isCreatedWitDefaultValues() {
-    // GIVEN
-    OzoneConfiguration conf = new OzoneConfiguration();
-
-    // WHEN
-    ReplicationConfig subject = conf.getObject(ReplicationConfig.class);
-
-    // THEN
-    assertEquals(REPLICATION_MAX_STREAMS_DEFAULT,
-        subject.getReplicationMaxStreams());
-  }
-
-}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
index 8078fc25c8..c6dc3c6c1b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
@@ -32,9 +32,9 @@ import java.util.function.Function;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl;
-import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
+import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 
@@ -58,7 +58,7 @@ import static java.util.Collections.emptyList;
 @RunWith(Parameterized.class)
 public class TestReplicationSupervisor {
 
-  private final ContainerReplicator noopReplicator = task -> { };
+  private final ContainerReplicator noopReplicator = task -> {};
   private final ContainerReplicator throwingReplicator = task -> {
     throw new RuntimeException("testing replication failure");
   };
@@ -75,15 +75,15 @@ public class TestReplicationSupervisor {
 
   private ContainerSet set;
 
-  private final ContainerLayoutVersion layout;
+  private final ChunkLayOutVersion layout;
 
-  public TestReplicationSupervisor(ContainerLayoutVersion layout) {
+  public TestReplicationSupervisor(ChunkLayOutVersion layout) {
     this.layout = layout;
   }
 
   @Parameterized.Parameters
   public static Iterable<Object[]> parameters() {
-    return ContainerLayoutTestInfo.containerLayoutParameters();
+    return ChunkLayoutTestInfo.chunkLayoutParameters();
   }
 
   @Before
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
index 3339540643..ebcdfee551 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils;
+import org.apache.hadoop.ozone.upgrade.TestUpgradeUtils;
 import org.apache.ozone.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Rule;
@@ -61,7 +61,7 @@ public class TestDataNodeStartupSlvLessThanMlv {
 
     // Create version file with MLV > SLV, which should fail the
     // DataNodeStateMachine construction.
-    UpgradeTestUtils.createVersionFile(datanodeSubdir,
+    TestUpgradeUtils.createVersionFile(datanodeSubdir,
         HddsProtos.NodeType.DATANODE, mlv);
 
     try {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
index d882ca4ed4..cb5257d5ea 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
@@ -144,7 +144,7 @@ public class TestDatanodeUpgradeToScmHA {
     ExecutorService executor = Executors.newFixedThreadPool(1);
     Future<Void> readFuture = executor.submit(() -> {
       // Layout version check should be thread safe.
-      while (!dsm.getLayoutVersionManager()
+      while(!dsm.getLayoutVersionManager()
           .isAllowed(HDDSLayoutFeature.SCM_HA)) {
         readChunk(writeChunk, pipeline);
       }
@@ -203,7 +203,7 @@ public class TestDatanodeUpgradeToScmHA {
     ExecutorService executor = Executors.newFixedThreadPool(1);
     Future<Void> importFuture = executor.submit(() -> {
       // Layout version check should be thread safe.
-      while (!dsm.getLayoutVersionManager()
+      while(!dsm.getLayoutVersionManager()
           .isAllowed(HDDSLayoutFeature.SCM_HA)) {
         importContainer(exportContainerID, exportedContainerFile);
         readChunk(exportWriteChunk, pipeline);
@@ -541,7 +541,7 @@ public class TestDatanodeUpgradeToScmHA {
    * Get the cluster ID and SCM ID from SCM to the datanode.
    */
   public void callVersionEndpointTask() throws Exception {
-    try (EndpointStateMachine esm = ContainerTestUtils.createEndpoint(conf,
+    try(EndpointStateMachine esm = ContainerTestUtils.createEndpoint(conf,
         address, 1000)) {
       VersionEndpointTask vet = new VersionEndpointTask(esm, conf,
           dsm.getContainer());
diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
index 2e27982d5e..f4bfcef323 100644
--- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
+++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
@@ -152,7 +152,6 @@
         <module name="WhitespaceAfter">
           <property name="tokens" value="COMMA, SEMI"/>
         </module>
-        <module name="WhitespaceAround"/>
 
 
         <!-- Modifier Checks                                    -->
diff --git a/hadoop-hdds/docs/content/feature/Observability.md b/hadoop-hdds/docs/content/feature/Observability.md
index 1ee95d8ade..cab6878091 100644
--- a/hadoop-hdds/docs/content/feature/Observability.md
+++ b/hadoop-hdds/docs/content/feature/Observability.md
@@ -69,7 +69,7 @@ Tracing is turned off by default, but can be turned on with `hdds.tracing.enable
 </property>
 ```
 
-Jaeger client can be configured with environment variables as documented [here](https://github.com/jaegertracing/jaeger-client-java/blob/master/jaeger-core/README.md):
+Jager client can be configured with environment variables as documented [here](https://github.com/jaegertracing/jaeger-client-java/blob/master/jaeger-core/README.md):
 
 For example:
 
diff --git a/hadoop-hdds/docs/content/feature/Observability.zh.md b/hadoop-hdds/docs/content/feature/Observability.zh.md
deleted file mode 100644
index 7a5c67b4cd..0000000000
--- a/hadoop-hdds/docs/content/feature/Observability.zh.md
+++ /dev/null
@@ -1,217 +0,0 @@
----
-title: "可观察性"
-weight: 8
-menu:
-main:
-parent: 特性
-summary: Ozone 的不同工具来提高可观察性
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone 提供了多种工具来获取有关集群当前状态的更多信息。
-
-## Prometheus
-Ozone 原生支持 Prometheus 集成。所有内部指标(由 Hadoop 指标框架收集)都发布在 `/prom` 的 HTTP 端点下。(例如,在 SCM 的 http://localhost:9876/prom)。
-
-Prometheus 端点默认是打开的,但可以通过`hdds.prometheus.endpoint.enabled`配置变量把它关闭。
-
-在安全环境中,该页面是用 SPNEGO 认证来保护的,但 Prometheus 不支持这种认证。为了在安全环境中启用监控,可以配置一个特定的认证令牌。
-
-`ozone-site.xml` 配置示例:
-
-```XML
-<property>
-   <name>hdds.prometheus.endpoint.token</name>
-   <value>putyourtokenhere</value>
-</property>
-```
-
-prometheus 配置示例:
-```YAML
-scrape_configs:
-  - job_name: ozone
-    bearer_token: <putyourtokenhere>
-    metrics_path: /prom
-    static_configs:
-     - targets:
-         - "127.0.0.1:9876" 
-```
-
-## 分布式跟踪
-分布式跟踪可以通过可视化端到端的性能来帮助了解性能瓶颈。
-
-Ozone 使用 [jaeger](https://jaegertracing.io) 跟踪库收集跟踪,可以将跟踪数据发送到任何兼容的后端(Zipkin,…)。
-
-默认情况下,跟踪功能是关闭的,可以通过 `ozon-site.xml` 的 `hdds.tracing.enabled` 配置变量打开。
-
-```XML
-<property>
-   <name>hdds.tracing.enabled</name>
-   <value>true</value>
-</property>
-```
-
-Jaeger 客户端可以用环境变量进行配置,如[这份](https://github.com/jaegertracing/jaeger-client-java/blob/master/jaeger-core/README.md)文档所述。
-
-例如:
-
-```shell
-JAEGER_SAMPLER_PARAM=0.01
-JAEGER_SAMPLER_TYPE=probabilistic
-JAEGER_AGENT_HOST=jaeger
-```
-
-此配置将记录1%的请求,以限制性能开销。有关 Jaeger 抽样的更多信息,请查看[文档](https://www.jaegertracing.io/docs/1.18/sampling/#client-sampling-configuration)。
-
-## Ozone Insight
-Ozone Insight 是一个用于检查 Ozone 集群当前状态的工具,它可以显示特定组件的日志记录、指标和配置。
-
-请使用`ozone insight list`命令检查可用的组件:
-
-```shell
-> ozone insight list
-
-Available insight points:
-
-  scm.node-manager                     SCM Datanode management related information.
-  scm.replica-manager                  SCM closed container replication manager
-  scm.event-queue                      Information about the internal async event delivery
-  scm.protocol.block-location          SCM Block location protocol endpoint
-  scm.protocol.container-location      SCM Container location protocol endpoint
-  scm.protocol.security                SCM Block location protocol endpoint
-  om.key-manager                       OM Key Manager
-  om.protocol.client                   Ozone Manager RPC endpoint
-  datanode.pipeline                    More information about one ratis datanode ring.
-```  
-
-## 配置
-
-`ozone insight config` 可以显示与特定组件有关的配置(只支持选定的组件)。
-
-```shell
-> ozone insight config scm.replica-manager
-
-Configuration for `scm.replica-manager` (SCM closed container replication manager)
-
->>> hdds.scm.replication.thread.interval
-       default: 300s
-       current: 300s
-
-There is a replication monitor thread running inside SCM which takes care of replicating the containers in the cluster. This property is used to configure the interval in which that thread runs.
-
-
->>> hdds.scm.replication.event.timeout
-       default: 30m
-       current: 30m
-
-Timeout for the container replication/deletion commands sent  to datanodes. After this timeout the command will be retried.
-
-```
-
-## 指标
-`ozone insight metrics` 可以显示与特定组件相关的指标(只支持选定的组件)。
-```shell
-> ozone insight metrics scm.protocol.block-location
-Metrics for `scm.protocol.block-location` (SCM Block location protocol endpoint)
-
-RPC connections
-
-  Open connections: 0
-  Dropped connections: 0
-  Received bytes: 1267
-  Sent bytes: 2420
-
-
-RPC queue
-
-  RPC average queue time: 0.0
-  RPC call queue length: 0
-
-
-RPC performance
-
-  RPC processing time average: 0.0
-  Number of slow calls: 0
-
-
-Message type counters
-
-  Number of AllocateScmBlock: ???
-  Number of DeleteScmKeyBlocks: ???
-  Number of GetScmInfo: ???
-  Number of SortDatanodes: ???
-```
-
-## 日志
-
-`ozone insights logs` 可以连接到所需的服务并显示与一个特定组件相关的DEBUG/TRACE日志。例如,显示RPC消息:
-
-```shell
->ozone insight logs om.protocol.client
-
-[OM] 2020-07-28 12:31:49,988 [DEBUG|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] OzoneProtocol ServiceList request is received
-[OM] 2020-07-28 12:31:50,095 [DEBUG|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] OzoneProtocol CreateVolume request is received
-```
-
-使用 `-v` 标志,也可以显示 protobuf 信息的内容(TRACE级别的日志):
-
-```shell
-ozone insight logs -v om.protocol.client
-
-[OM] 2020-07-28 12:33:28,463 [TRACE|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] [service=OzoneProtocol] [type=CreateVolume] request is received:
-cmdType: CreateVolume
-traceID: ""
-clientId: "client-A31DF5C6ECF2"
-createVolumeRequest {
-  volumeInfo {
-    adminName: "hadoop"
-    ownerName: "hadoop"
-    volume: "vol1"
-    quotaInBytes: 1152921504606846976
-    volumeAcls {
-      type: USER
-      name: "hadoop"
-      rights: "200"
-      aclScope: ACCESS
-    }
-    volumeAcls {
-      type: GROUP
-      name: "users"
-      rights: "200"
-      aclScope: ACCESS
-    }
-    creationTime: 1595939608460
-    objectID: 0
-    updateID: 0
-    modificationTime: 0
-  }
-}
-
-[OM] 2020-07-28 12:33:28,474 [TRACE|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] [service=OzoneProtocol] [type=CreateVolume] request is processed. Response:
-cmdType: CreateVolume
-traceID: ""
-success: false
-message: "Volume already exists"
-status: VOLUME_ALREADY_EXISTS
-```
-
-<div class="alert alert-warning" role="alert">
-
-实际上 `ozone insight` 是通过 HTTP 端点来检索所需的信息(`/conf`、`/prom`和`/logLevel`端点),它在安全环境中还不被支持。
-
-</div>
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md
index 2a9898427c..c85da0f73f 100644
--- a/hadoop-hdds/docs/content/security/SecuringTDE.md
+++ b/hadoop-hdds/docs/content/security/SecuringTDE.md
@@ -58,10 +58,10 @@ To create an encrypted bucket, client need to:
    * Assign the encryption key to a bucket.
 
   ```bash
-  ozone sh bucket create -k encKey /vol/encryptedbucket
+  ozone sh bucket create -k encKey /vol/encryptedBucket
   ```
 
-After this command, all data written to the _encryptedbucket_ will be encrypted
+After this command, all data written to the _encryptedBucket_ will be encrypted
 via the encKey and while reading the clients will talk to Key Management
 Server and read the key and decrypt it. In other words, the data stored
 inside Ozone is always encrypted. The fact that data is encrypted at rest
@@ -71,47 +71,20 @@ will be completely transparent to the clients and end users.
 
 There are two ways to create an encrypted bucket that can be accessed via S3 Gateway.
 
-#### Option 1. Create a bucket using shell under "/s3v" volume
+####1. Create a bucket using shell under "/s3v" volume
 
   ```bash
-  ozone sh bucket create -k encKey --layout=OBJECT_STORE /s3v/encryptedbucket
+  ozone sh bucket create -k encKey /s3v/encryptedBucket
   ```
-
-#### Option 2. Create a link to an encrypted bucket under "/s3v" volume
+####2. Create a link to an encrypted bucket under "/s3v" volume
 
   ```bash
-  ozone sh bucket create -k encKey --layout=OBJECT_STORE /vol/encryptedbucket
-  ozone sh bucket link /vol/encryptedbucket /s3v/linkencryptedbucket
+  ozone sh bucket create -k encKey /vol/encryptedBucket
+  ozone sh bucket link  /vol/encryptedBucket /s3v/linkencryptedbucket
   ```
-
-Note 1: An encrypted bucket cannot be created via S3 APIs. It must be done using Ozone shell commands as shown above.
+Note: An encrypted bucket cannot be created via S3 APIs. It must be done using Ozone shell commands as shown above.
 After creating an encrypted bucket, all the keys added to this bucket using s3g will be encrypted.
 
-Note 2: `--layout=OBJECT_STORE` is specified in the above examples
-for full compatibility with S3 (which is the default value for the `--layout`
-argument, but explicitly added here to make a point).
-
-Bucket created with the `OBJECT_STORE` type will NOT be accessible via
-HCFS (ofs or o3fs) at all. And such access will be rejected. For instance:
-
-  ```bash
-  $ ozone fs -ls ofs://ozone1/s3v/encryptedbucket/
-  -ls: Bucket: encryptedbucket has layout: OBJECT_STORE, which does not support file system semantics. Bucket Layout must be FILE_SYSTEM_OPTIMIZED or LEGACY.
-  ```
-
-  ```bash
-  $ ozone fs -ls o3fs://encryptedbucket.s3v.ozone1/
-  22/02/07 00:00:00 WARN fs.FileSystem: Failed to initialize fileystem o3fs://encryptedbucket.s3v.ozone1/: java.lang.IllegalArgumentException: Bucket: encryptedbucket has layout: OBJECT_STORE, which does not support file system semantics. Bucket Layout must be FILE_SYSTEM_OPTIMIZED or LEGACY.
-  -ls: Bucket: encryptedbucket has layout: OBJECT_STORE, which does not support file system semantics. Bucket Layout must be FILE_SYSTEM_OPTIMIZED or LEGACY.
-  ```
-
-If one wants the bucket to be accessible from both S3G and HCFS (ofs and o3fs)
-at the same time, use `--layout=FILE_SYSTEM_OPTIMIZED` instead.
-
-However, in buckets with `FILE_SYSTEM_OPTIMIZED` layout, some irregular S3 key
-names may be rejected or normalized, which can be undesired.
-See [Prefix based File System Optimization]({{< relref "../feature/PrefixFSO.md" >}}) for more information.
-
 In non-secure mode, the user running the S3Gateway daemon process is the proxy user, 
 while in secure mode the S3Gateway Kerberos principal (ozone.s3g.kerberos.principal) is the proxy user. 
 S3Gateway proxy's all the users accessing the encrypted buckets to decrypt the key. 
@@ -138,11 +111,12 @@ The below two configurations must be added to the kms-site.xml to allow the S3Ga
          This is the host where the S3Gateway is running. Set this to '*' to allow
          requests from any hosts to be proxied.
   </description>
+
 </property>
-```
 
-### KMS Authorization
+```
 
+###KMS Authorization
 If Ranger authorization is enabled for KMS, then decrypt key permission should be given to
 access key id user(currently access key is kerberos principal) to decrypt the encrypted key 
 to read/write a key in the encrypted bucket.
diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md
index d7a2911cbd..bda5e76114 100644
--- a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md
+++ b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md
@@ -49,7 +49,7 @@ hadoop.security.key.provider.path  | KMS uri. <br> 比如 kms://http@kms-host:96
    * 将加密密钥分配给桶
 
   ```bash
-  ozone sh bucket create -k encKey /vol/encryptedbucket
+  ozone sh bucket create -k encKey /vol/encryptedBucket
   ```
 
-这条命令执行后,所以写往 _encryptedbucket_ 的数据都会用 encKey 进行加密,当读取里面的数据时,客户端通过 KMS 获取密钥进行解密。换句话说,Ozone 中存储的数据一直是加密的,但用户和客户端对此完全无感知。
+这条命令执行后,所以写往 _encryptedBucket_ 的数据都会用 encKey 进行加密,当读取里面的数据时,客户端通过 KMS 获取密钥进行解密。换句话说,Ozone 中存储的数据一直是加密的,但用户和客户端对此完全无感知。
diff --git a/hadoop-hdds/docs/content/tools/TestTools.md b/hadoop-hdds/docs/content/tools/TestTools.md
index 83b40cb5f3..ac025f0a32 100644
--- a/hadoop-hdds/docs/content/tools/TestTools.md
+++ b/hadoop-hdds/docs/content/tools/TestTools.md
@@ -106,4 +106,131 @@ Average Time spent in key write: 00:00:10,894
 Total bytes written: 10240000
 Total Execution time: 00:00:16,898
 ***********************
-```
\ No newline at end of file
+```
+
+## Genesis
+
+Genesis is a microbenchmarking tool. It's also included in the distribution (`ozone genesis`) but it doesn't require real cluster. It measures different part of the code in an isolated way (eg. the code which saves the data to the local RocksDB based key value stores)
+
+Example run:
+
+```
+ ozone genesis -benchmark=BenchMarkRocksDbStore
+# JMH version: 1.19
+# VM version: JDK 11.0.1, VM 11.0.1+13-LTS
+# VM invoker: /usr/lib/jvm/java-11-openjdk-11.0.1.13-3.el7_6.x86_64/bin/java
+# VM options: -Dproc_genesis -Djava.net.preferIPv4Stack=true -Dhadoop.log.dir=/var/log/hadoop -Dhadoop.log.file=hadoop.log -Dhadoop.home.dir=/opt/hadoop -Dhadoop.id.str=hadoop -Dhadoop.root.logger=INFO,console -Dhadoop.policy.file=hadoop-policy.xml -Dhadoop.security.logger=INFO,NullAppender
+# Warmup: 2 iterations, 1 s each
+# Measurement: 20 iterations, 1 s each
+# Timeout: 10 min per iteration
+# Threads: 4 threads, will synchronize iterations
+# Benchmark mode: Throughput, ops/time
+# Benchmark: org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test
+# Parameters: (backgroundThreads = 4, blockSize = 8, maxBackgroundFlushes = 4, maxBytesForLevelBase = 512, maxOpenFiles = 5000, maxWriteBufferNumber = 16, writeBufferSize = 64)
+
+# Run progress: 0.00% complete, ETA 00:00:22
+# Fork: 1 of 1
+# Warmup Iteration   1: 213775.360 ops/s
+# Warmup Iteration   2: 32041.633 ops/s
+Iteration   1: 196342.348 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   2: 41926.816 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   3: 210433.231 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   4: 46941.951 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   5: 212825.884 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   6: 145914.351 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   7: 141838.469 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   8: 205334.438 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   9: 163709.519 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  10: 162494.608 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  11: 199155.793 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  12: 209679.298 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  13: 193787.574 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  14: 127004.147 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  15: 145511.080 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  16: 223433.864 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  17: 169752.665 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  18: 165217.191 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  19: 191038.476 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  20: 196335.579 ops/s
+                 ?stack: <delayed till summary>
+
+
+
+Result "org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test":
+  167433.864 ?(99.9%) 43530.883 ops/s [Average]
+  (min, avg, max) = (41926.816, 167433.864, 223433.864), stdev = 50130.230
+  CI (99.9%): [123902.981, 210964.748] (assumes normal distribution)
+
+Secondary result "org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test:?stack":
+Stack profiler:
+
+....[Thread state distributions]....................................................................
+ 78.9%         RUNNABLE
+ 20.0%         TIMED_WAITING
+  1.1%         WAITING
+
+....[Thread state: RUNNABLE]........................................................................
+ 59.8%  75.8% org.rocksdb.RocksDB.put
+ 16.5%  20.9% org.rocksdb.RocksDB.get
+  0.7%   0.9% java.io.UnixFileSystem.delete0
+  0.7%   0.9% org.rocksdb.RocksDB.disposeInternal
+  0.3%   0.4% java.lang.Long.formatUnsignedLong0
+  0.1%   0.2% org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test
+  0.1%   0.1% java.lang.Long.toUnsignedString0
+  0.1%   0.1% org.apache.hadoop.ozone.genesis.generated.BenchMarkRocksDbStore_test_jmhTest.test_thrpt_jmhStub
+  0.0%   0.1% java.lang.Object.clone
+  0.0%   0.0% java.lang.Thread.currentThread
+  0.4%   0.5% <other>
+
+....[Thread state: TIMED_WAITING]...................................................................
+ 20.0% 100.0% java.lang.Object.wait
+
+....[Thread state: WAITING].........................................................................
+  1.1% 100.0% jdk.internal.misc.Unsafe.park
+
+
+
+# Run complete. Total time: 00:00:38
+
+Benchmark                          (backgroundThreads)  (blockSize)  (maxBackgroundFlushes)  (maxBytesForLevelBase)  (maxOpenFiles)  (maxWriteBufferNumber)  (writeBufferSize)   Mode  Cnt       Score       Error  Units
+BenchMarkRocksDbStore.test                           4            8                       4                     512            5000                      16                 64  thrpt   20  167433.864 ? 43530.883  ops/s
+BenchMarkRocksDbStore.test:?stack                    4            8                       4                     512            5000                      16                 64  thrpt              NaN                ---
+```
diff --git a/hadoop-hdds/docs/content/tools/TestTools.zh.md b/hadoop-hdds/docs/content/tools/TestTools.zh.md
index df02389c8a..c6dfd2cf61 100644
--- a/hadoop-hdds/docs/content/tools/TestTools.zh.md
+++ b/hadoop-hdds/docs/content/tools/TestTools.zh.md
@@ -107,4 +107,131 @@ Average Time spent in key write: 00:00:10,894
 Total bytes written: 10240000
 Total Execution time: 00:00:16,898
 ***********************
-```
\ No newline at end of file
+```
+
+## Genesis
+
+Genesis 是一个微型的基准测试工具,它也包含在发行包中(`ozone genesis`),但是它不需要一个真实的集群,而是采用一种隔离的方法测试不同部分的代码(比如,将数据存储到本地基于 RocksDB 的键值存储中)。
+
+运行示例:
+
+```
+ ozone genesis -benchmark=BenchMarkRocksDbStore
+# JMH version: 1.19
+# VM version: JDK 11.0.1, VM 11.0.1+13-LTS
+# VM invoker: /usr/lib/jvm/java-11-openjdk-11.0.1.13-3.el7_6.x86_64/bin/java
+# VM options: -Dproc_genesis -Djava.net.preferIPv4Stack=true -Dhadoop.log.dir=/var/log/hadoop -Dhadoop.log.file=hadoop.log -Dhadoop.home.dir=/opt/hadoop -Dhadoop.id.str=hadoop -Dhadoop.root.logger=INFO,console -Dhadoop.policy.file=hadoop-policy.xml -Dhadoop.security.logger=INFO,NullAppender
+# Warmup: 2 iterations, 1 s each
+# Measurement: 20 iterations, 1 s each
+# Timeout: 10 min per iteration
+# Threads: 4 threads, will synchronize iterations
+# Benchmark mode: Throughput, ops/time
+# Benchmark: org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test
+# Parameters: (backgroundThreads = 4, blockSize = 8, maxBackgroundFlushes = 4, maxBytesForLevelBase = 512, maxOpenFiles = 5000, maxWriteBufferNumber = 16, writeBufferSize = 64)
+
+# Run progress: 0.00% complete, ETA 00:00:22
+# Fork: 1 of 1
+# Warmup Iteration   1: 213775.360 ops/s
+# Warmup Iteration   2: 32041.633 ops/s
+Iteration   1: 196342.348 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   2: 41926.816 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   3: 210433.231 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   4: 46941.951 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   5: 212825.884 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   6: 145914.351 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   7: 141838.469 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   8: 205334.438 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration   9: 163709.519 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  10: 162494.608 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  11: 199155.793 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  12: 209679.298 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  13: 193787.574 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  14: 127004.147 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  15: 145511.080 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  16: 223433.864 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  17: 169752.665 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  18: 165217.191 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  19: 191038.476 ops/s
+                 ?stack: <delayed till summary>
+
+Iteration  20: 196335.579 ops/s
+                 ?stack: <delayed till summary>
+
+
+
+Result "org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test":
+  167433.864 ?(99.9%) 43530.883 ops/s [Average]
+  (min, avg, max) = (41926.816, 167433.864, 223433.864), stdev = 50130.230
+  CI (99.9%): [123902.981, 210964.748] (assumes normal distribution)
+
+Secondary result "org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test:?stack":
+Stack profiler:
+
+....[Thread state distributions]....................................................................
+ 78.9%         RUNNABLE
+ 20.0%         TIMED_WAITING
+  1.1%         WAITING
+
+....[Thread state: RUNNABLE]........................................................................
+ 59.8%  75.8% org.rocksdb.RocksDB.put
+ 16.5%  20.9% org.rocksdb.RocksDB.get
+  0.7%   0.9% java.io.UnixFileSystem.delete0
+  0.7%   0.9% org.rocksdb.RocksDB.disposeInternal
+  0.3%   0.4% java.lang.Long.formatUnsignedLong0
+  0.1%   0.2% org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test
+  0.1%   0.1% java.lang.Long.toUnsignedString0
+  0.1%   0.1% org.apache.hadoop.ozone.genesis.generated.BenchMarkRocksDbStore_test_jmhTest.test_thrpt_jmhStub
+  0.0%   0.1% java.lang.Object.clone
+  0.0%   0.0% java.lang.Thread.currentThread
+  0.4%   0.5% <other>
+
+....[Thread state: TIMED_WAITING]...................................................................
+ 20.0% 100.0% java.lang.Object.wait
+
+....[Thread state: WAITING].........................................................................
+  1.1% 100.0% jdk.internal.misc.Unsafe.park
+
+
+
+# Run complete. Total time: 00:00:38
+
+Benchmark                          (backgroundThreads)  (blockSize)  (maxBackgroundFlushes)  (maxBytesForLevelBase)  (maxOpenFiles)  (maxWriteBufferNumber)  (writeBufferSize)   Mode  Cnt       Score       Error  Units
+BenchMarkRocksDbStore.test                           4            8                       4                     512            5000                      16                 64  thrpt   20  167433.864 ? 43530.883  ops/s
+BenchMarkRocksDbStore.test:?stack                    4            8                       4                     512            5000                      16                 64  thrpt              NaN                ---
+```
diff --git a/hadoop-hdds/docs/content/tools/_index.md b/hadoop-hdds/docs/content/tools/_index.md
index 12dd7f4faa..090ba357b4 100644
--- a/hadoop-hdds/docs/content/tools/_index.md
+++ b/hadoop-hdds/docs/content/tools/_index.md
@@ -62,5 +62,6 @@ Admin commands:
 Test tools:
 
    * **freon** -  Runs the ozone load generator.
+   * **genesis**  - Developer Only, Ozone micro-benchmark application.
 
  For more information see the following subpages:
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/tools/_index.zh.md b/hadoop-hdds/docs/content/tools/_index.zh.md
index a8e9142719..43f4587e47 100644
--- a/hadoop-hdds/docs/content/tools/_index.zh.md
+++ b/hadoop-hdds/docs/content/tools/_index.zh.md
@@ -57,5 +57,6 @@ Ozone 有一系列管理 Ozone 的命令行工具。
 测试工具:
 
    * **freon** -  运行 Ozone 负载生成器。
+   * **genesis**  - Ozone 的 benchmark 应用,仅供开发者使用。
 
 更多信息请参见下面的子页面:
\ No newline at end of file
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html
index 1f558d9c60..07c55b3164 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html
+++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html
@@ -16,4 +16,4 @@
 -->
 
 <!-- shortcode to easily scale images according to page width-->
-<img src='{{ .Get "src" }}' class="img-responsive"/>
\ No newline at end of file
+<img src='{{ .Get "src" }}' style="max-width:100%"/>
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
index 14e63a1b30..d89ecc68de 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
@@ -366,7 +366,7 @@ public class SCMSecurityProtocolClientSideTranslatorPB implements
         .setReason(Reason.valueOf(reason))
         .setRevokeTime(revocationTime).build();
     return submitRequest(Type.RevokeCertificates,
-        builder -> builder.setRevokeCertificatesRequest(req))
+        builder->builder.setRevokeCertificatesRequest(req))
         .getRevokeCertificatesResponseProto().getCrlId();
   }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 77ef3f09a4..c484420ba0 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolPro
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineBatchRequestProto;
@@ -53,8 +52,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolPro
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetExistContainerWithPipelinesInBatchRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
@@ -64,8 +61,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolPro
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerReportRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerReportResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusRequestProto;
@@ -89,7 +84,6 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
@@ -255,26 +249,6 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
 
   }
 
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<HddsProtos.SCMContainerReplicaProto>
-      getContainerReplicas(long containerID) throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative");
-
-    GetContainerReplicasRequestProto request =
-        GetContainerReplicasRequestProto.newBuilder()
-            .setTraceID(TracingUtil.exportCurrentSpan())
-            .setContainerID(containerID).build();
-
-    ScmContainerLocationResponse response =
-        submitRequest(Type.GetContainerReplicas,
-            (builder) -> builder.setGetContainerReplicasRequest(request));
-    return response.getGetContainerReplicasResponse().getContainerReplicaList();
-  }
-
   /**
    * {@inheritDoc}
    */
@@ -332,7 +306,7 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
       response = submitRequest(Type.GetExistContainerWithPipelinesInBatch,
           (builder) -> builder
               .setGetExistContainerWithPipelinesInBatchRequest(request));
-    } catch (IOException ex) {
+    } catch (IOException ex){
       return cps;
     }
 
@@ -761,27 +735,13 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
 
   }
 
-  @Override
-  public ReplicationManagerReport getReplicationManagerReport()
-      throws IOException {
-    ReplicationManagerReportRequestProto request =
-        ReplicationManagerReportRequestProto.newBuilder()
-            .setTraceID(TracingUtil.exportCurrentSpan())
-            .build();
-    ReplicationManagerReportResponseProto response =
-        submitRequest(Type.GetReplicationManagerReport,
-            builder -> builder.setReplicationManagerReportRequest(request))
-        .getGetReplicationManagerReportResponse();
-    return ReplicationManagerReport.fromProtobuf(response.getReport());
-  }
-
   @Override
   public boolean startContainerBalancer(
-      Optional<Double> threshold, Optional<Integer> iterations,
-      Optional<Integer> maxDatanodesPercentageToInvolvePerIteration,
+      Optional<Double> threshold, Optional<Integer> idleiterations,
+      Optional<Double> maxDatanodesRatioToInvolvePerIteration,
       Optional<Long> maxSizeToMovePerIterationInGB,
       Optional<Long> maxSizeEnteringTargetInGB,
-      Optional<Long> maxSizeLeavingSourceInGB) throws IOException {
+      Optional<Long> maxSizeLeavingSourceInGB) throws IOException{
     StartContainerBalancerRequestProto.Builder builder =
         StartContainerBalancerRequestProto.newBuilder();
     builder.setTraceID(TracingUtil.exportCurrentSpan());
@@ -789,8 +749,8 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
     //make balancer configuration optional
     if (threshold.isPresent()) {
       double tsd = threshold.get();
-      Preconditions.checkState(tsd >= 0.0D && tsd < 100D,
-          "threshold should be specified in range [0.0, 100.0).");
+      Preconditions.checkState(tsd >= 0.0D && tsd < 1.0D,
+          "threshold should to be specified in range [0.0, 1.0).");
       builder.setThreshold(tsd);
     }
     if (maxSizeToMovePerIterationInGB.isPresent()) {
@@ -799,22 +759,22 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
           "maxSizeToMovePerIterationInGB must be positive.");
       builder.setMaxSizeToMovePerIterationInGB(mstm);
     }
-    if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) {
-      int mdti = maxDatanodesPercentageToInvolvePerIteration.get();
+    if (maxDatanodesRatioToInvolvePerIteration.isPresent()) {
+      double mdti = maxDatanodesRatioToInvolvePerIteration.get();
       Preconditions.checkState(mdti >= 0,
-          "maxDatanodesPercentageToInvolvePerIteration must be " +
+          "maxDatanodesRatioToInvolvePerIteration must be " +
               "greater than equal to zero.");
-      Preconditions.checkState(mdti <= 100,
-          "maxDatanodesPercentageToInvolvePerIteration must be " +
-              "lesser than equal to hundred.");
-      builder.setMaxDatanodesPercentageToInvolvePerIteration(mdti);
+      Preconditions.checkState(mdti <= 1,
+          "maxDatanodesRatioToInvolvePerIteration must be " +
+              "lesser than equal to one.");
+      builder.setMaxDatanodesRatioToInvolvePerIteration(mdti);
     }
-    if (iterations.isPresent()) {
-      int i = iterations.get();
-      Preconditions.checkState(i > 0 || i == -1,
-          "number of iterations must be positive or" +
-              " -1 (for running container balancer infinitely).");
-      builder.setIterations(i);
+    if (idleiterations.isPresent()) {
+      int idi = idleiterations.get();
+      Preconditions.checkState(idi > 0 || idi == -1,
+          "idleiterations must be positive or" +
+              " -1(infinitly run container balancer).");
+      builder.setIdleiterations(idi);
     }
 
     if (maxSizeEnteringTargetInGB.isPresent()) {
@@ -971,18 +931,6 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
     return OzonePBHelper.tokenFromProto(response.getToken());
   }
 
-  @Override
-  public long getContainerCount() throws IOException {
-    GetContainerCountRequestProto request =
-        GetContainerCountRequestProto.newBuilder().build();
-
-    GetContainerCountResponseProto response =
-        submitRequest(Type.GetContainerCount,
-          builder -> builder.setGetContainerCountRequest(request))
-        .getGetContainerCountResponse();
-    return response.getContainerCount();
-  }
-
   @Override
   public Object getUnderlyingProxyObject() {
     return rpcProxy;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java
index 72da5194c3..6d544819d9 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java
@@ -68,7 +68,7 @@ public class CRLClientUpdateHandler implements ClientUpdateHandler {
 
     this.clientStore = serviceGrpcClient.getClientCRLStore();
     this.crlCheckInterval = crlCheckInterval;
-    LOG.info("Pending CRL check interval : {}s", crlCheckInterval / 1000);
+    LOG.info("Pending CRL check interval : {}s", crlCheckInterval/1000);
     this.executorService = Executors.newSingleThreadScheduledExecutor(
         new ThreadFactoryBuilder().setDaemon(true)
             .setNameFormat("CRLUpdateHandler Thread - %d").build());
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java
index 5e326ccfea..721988ec7a 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java
@@ -76,7 +76,7 @@ public class ClientCRLStore implements CRLStore {
 
   public List<Long> getRevokedCertIds(X509CRL crl) {
     return Collections.unmodifiableList(crl.getRevokedCertificates().stream()
-        .map(cert -> cert.getSerialNumber().longValue())
+        .map(cert->cert.getSerialNumber().longValue())
         .collect(Collectors.toList()));
   }
 
@@ -91,7 +91,7 @@ public class ClientCRLStore implements CRLStore {
 
   public List<Long> getPendingCrlIds() {
     return new ArrayList<>(pendingCrls)
-        .stream().map(crl -> crl.getCrlSequenceID())
+        .stream().map(crl->crl.getCrlSequenceID())
         .collect(Collectors.toList());
   }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java
index 8b96d5c0a9..96e157711b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java
@@ -92,7 +92,7 @@ public class SCMUpdateServiceGrpcClient {
       createChannel();
     }
     clientId = subScribeClient();
-    assert (clientId != null);
+    assert(clientId != null);
 
     // start background thread processing pending crl ids.
     handler = new CRLClientUpdateHandler(clientId, updateClient,
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java
index 6738868942..3136168cc5 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java
@@ -100,7 +100,7 @@ public abstract class BaseApprover implements CertificateApprover {
     Objects.requireNonNull(attribute);
     List<Extensions> extensionsList = new ArrayList<>();
     for (ASN1Encodable value : attribute.getAttributeValues()) {
-      if (value != null) {
+      if(value != null) {
         Extensions extensions = Extensions.getInstance(value);
         extensionsList.add(extensions);
       }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
index 83be3aaf3b..fc2a77b02b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
@@ -228,7 +228,7 @@ public class DefaultCAServer implements CertificateServer {
     CompletableFuture<X509CertificateHolder> xcertHolder =
         approver.inspectCSR(csr);
 
-    if (xcertHolder.isCompletedExceptionally()) {
+    if(xcertHolder.isCompletedExceptionally()) {
       // This means that approver told us there are things which it disagrees
       // with in this Certificate Request. Since the first set of sanity
       // checks failed, we just return the future object right here.
@@ -324,7 +324,7 @@ public class DefaultCAServer implements CertificateServer {
   public List<X509Certificate> listCertificate(NodeType role,
       long startSerialId, int count, boolean isRevoked) throws IOException {
     return store.listCertificate(role, BigInteger.valueOf(startSerialId), count,
-        isRevoked ? CertificateStore.CertType.REVOKED_CERTS :
+        isRevoked? CertificateStore.CertType.REVOKED_CERTS :
             CertificateStore.CertType.VALID_CERTS);
   }
 
@@ -554,7 +554,7 @@ public class DefaultCAServer implements CertificateServer {
       OzoneSecurityUtil.getValidInetsForCurrentHost().forEach(
           ip -> {
             builder.addIpAddress(ip.getHostAddress());
-            if (validator.isValid(ip.getCanonicalHostName())) {
+            if(validator.isValid(ip.getCanonicalHostName())) {
               builder.addDnsName(ip.getCanonicalHostName());
             }
           });
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
index da799d7d45..a146c738d1 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
@@ -53,7 +53,7 @@ public class DefaultCAProfile extends DefaultProfile {
       PKIProfile pkiProfile) {
     BasicConstraints constraints =
         BasicConstraints.getInstance(ext.getParsedValue());
-    if (constraints.isCA()) {
+    if(constraints.isCA()) {
       if (pkiProfile.isCA()) {
         return true;
       }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
index d681806c12..d831c834fd 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
@@ -516,7 +516,7 @@ public abstract class DefaultCertificateClient implements CertificateClient {
       OzoneSecurityUtil.getValidInetsForCurrentHost().forEach(
           ip -> {
             builder.addIpAddress(ip.getHostAddress());
-            if (validator.isValid(ip.getCanonicalHostName())) {
+            if(validator.isValid(ip.getCanonicalHostName())) {
               builder.addDnsName(ip.getCanonicalHostName());
             } else {
               getLogger().error("Invalid domain {}", ip.getCanonicalHostName());
@@ -580,7 +580,7 @@ public abstract class DefaultCertificateClient implements CertificateClient {
       String certName = String.format(CERT_FILE_NAME_FORMAT,
           cert.getSerialNumber().toString());
 
-      if (caCert) {
+      if(caCert) {
         certName = CA_CERT_PREFIX + certName;
         caCertId = cert.getSerialNumber().toString();
       }
@@ -688,17 +688,17 @@ public abstract class DefaultCertificateClient implements CertificateClient {
   @Override
   public synchronized InitResponse init() throws CertificateException {
     int initCase = 0;
-    PrivateKey pvtKey = getPrivateKey();
+    PrivateKey pvtKey= getPrivateKey();
     PublicKey pubKey = getPublicKey();
     X509Certificate certificate = getCertificate();
 
-    if (pvtKey != null) {
-      initCase = initCase | 1 << 2;
+    if(pvtKey != null){
+      initCase = initCase | 1<<2;
     }
-    if (pubKey != null) {
-      initCase = initCase | 1 << 1;
+    if(pubKey != null){
+      initCase = initCase | 1<<1;
     }
-    if (certificate != null) {
+    if(certificate != null){
       initCase = initCase | 1;
     }
     getLogger().info("Certificate client init case: {}", initCase);
@@ -800,7 +800,7 @@ public abstract class DefaultCertificateClient implements CertificateClient {
     PublicKey pubKey = getCertificate().getPublicKey();
     try {
 
-      if (validateKeyPair(pubKey)) {
+      if(validateKeyPair(pubKey)){
         keyCodec.writePublicKey(pubKey);
         publicKey = pubKey;
       } else {
@@ -922,7 +922,7 @@ public abstract class DefaultCertificateClient implements CertificateClient {
         updateCAList();
       }
       return pemEncodedCACerts;
-    } finally {
+    }finally {
       lock.unlock();
     }
   }
@@ -947,7 +947,7 @@ public abstract class DefaultCertificateClient implements CertificateClient {
   }
 
   @Override
-  public boolean processCrl(CRLInfo crl) {
+  public boolean processCrl(CRLInfo crl){
     List<String> certIds2Remove = new ArrayList();
     crl.getX509CRL().getRevokedCertificates().forEach(
         cert -> certIds2Remove.add(cert.getSerialNumber().toString()));
@@ -957,15 +957,15 @@ public abstract class DefaultCertificateClient implements CertificateClient {
   }
 
 
-  private boolean removeCertificates(List<String> certIds) {
+  private boolean removeCertificates(List<String> certIds){
     lock.lock();
     boolean reInitCert = false;
     try {
       // For now, remove self cert and ca cert is not implemented
       // both requires a restart of the service.
-      if ((certSerialId != null && certIds.contains(certSerialId)) ||
-          (caCertId != null && certIds.contains(caCertId)) ||
-          (rootCaCertId != null && certIds.contains(rootCaCertId))) {
+      if ((certSerialId!=null && certIds.contains(certSerialId)) ||
+          (caCertId!=null && certIds.contains(caCertId)) ||
+          (rootCaCertId!=null && certIds.contains(rootCaCertId))) {
         reInitCert = true;
       }
 
@@ -1004,7 +1004,7 @@ public abstract class DefaultCertificateClient implements CertificateClient {
    * Set Local CRL id.
    * @param crlId
    */
-  public void setLocalCrlId(long crlId) {
+  public void setLocalCrlId(long crlId){
     this.localCrlId = crlId;
   }
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
index 6143bd1030..7aea5967df 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
@@ -44,8 +44,8 @@ public class OMCertificateClient extends DefaultCertificateClient {
   public OMCertificateClient(SecurityConfig securityConfig,
       String certSerialId, String localCrlId) {
     super(securityConfig, LOG, certSerialId, COMPONENT_NAME);
-    this.setLocalCrlId(localCrlId != null ?
-        Long.parseLong(localCrlId) : 0);
+    this.setLocalCrlId(localCrlId!=null ?
+        Long.parseLong(localCrlId): 0);
   }
 
   public OMCertificateClient(SecurityConfig securityConfig,
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
index ec7b5a83f2..b8d2859eed 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
@@ -117,7 +117,7 @@ public final class CertificateSignRequest {
     PemObject pemObject =
         new PemObject("CERTIFICATE REQUEST", request.getEncoded());
     StringWriter str = new StringWriter();
-    try (JcaPEMWriter pemWriter = new JcaPEMWriter(str)) {
+    try(JcaPEMWriter pemWriter = new JcaPEMWriter(str)) {
       pemWriter.writeObject(pemObject);
     }
     return str.toString();
@@ -135,7 +135,7 @@ public final class CertificateSignRequest {
       throws IOException {
     try (PemReader reader = new PemReader(new StringReader(csr))) {
       PemObject pemObject = reader.readPemObject();
-      if (pemObject.getContent() == null) {
+      if(pemObject.getContent() == null) {
         throw new SCMSecurityException("Invalid Certificate signing request",
             INVALID_CSR);
       }
@@ -268,10 +268,10 @@ public final class CertificateSignRequest {
 
     private Extension getKeyUsageExtension() throws IOException {
       int keyUsageFlag = KeyUsage.keyAgreement;
-      if (digitalEncryption) {
+      if(digitalEncryption){
         keyUsageFlag |= KeyUsage.keyEncipherment | KeyUsage.dataEncipherment;
       }
-      if (digitalSignature) {
+      if(digitalSignature) {
         keyUsageFlag |= KeyUsage.digitalSignature;
       }
 
@@ -303,7 +303,7 @@ public final class CertificateSignRequest {
       List<Extension> extensions = new ArrayList<>();
 
       // Add basic extension
-      if (ca) {
+      if(ca) {
         extensions.add(getBasicExtension());
       }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
index 8aa512f691..5a9fba65b1 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
@@ -44,7 +44,7 @@ public class CRLInfo implements Comparator<CRLInfo>,
   private Instant revocationTime;
 
   private CRLInfo(X509CRL x509CRL, long creationTimestamp, long crlSequenceID) {
-    assert ((x509CRL != null) &&
+    assert((x509CRL != null) &&
         !x509CRL.getRevokedCertificates().isEmpty());
     this.x509CRL = x509CRL;
     this.creationTimestamp = creationTimestamp;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java
index 2d53b8fb6f..3178cfdc3b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java
@@ -44,7 +44,7 @@ public class CRLInfoCodec implements Codec<CRLInfo> {
     try {
       return CRLInfo.fromProtobuf(
           HddsProtos.CRLInfoProto.PARSER.parseFrom(rawData));
-    } catch (CertificateException | CRLException e) {
+    } catch (CertificateException|CRLException e) {
       throw new IllegalArgumentException(
           "Can't encode the the raw data from the byte array", e);
     }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
index f4f188aaf3..a3f1b6bc2c 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
@@ -192,7 +192,7 @@ public class ProfileServlet extends HttpServlet {
   protected static String generateFileName(Integer pid, Output output,
       Event event) {
     String outputFormat = output.name().toLowerCase();
-    if (output == Output.FLAMEGRAPH) {
+    if(output == Output.FLAMEGRAPH) {
       outputFormat = "html";
     }
     return FILE_PREFIX + pid + "-" +
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java
index 3dc176644d..87dc882a00 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
  * This interface is for maintaining DB checkpoint statistics.
  */
 @InterfaceAudience.Private
-@Metrics(about = "DB checkpoint Metrics", context = "dfs")
+@Metrics(about="DB checkpoint Metrics", context="dfs")
 public class DBCheckpointMetrics {
   private static final String SOURCE_NAME =
       DBCheckpointMetrics.class.getSimpleName();
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
index 7f2deeb093..78f8a80ae2 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
@@ -404,7 +404,7 @@ public final class HAUtils {
           return getCAListWithRetry(() -> waitForCACerts(
               scmSecurityProtocolClient::listCACertificate,
               expectedCount), waitDuration);
-        } else {
+        } else{
           return scmSecurityProtocolClient.listCACertificate();
         }
       }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
index e3b91ba8ed..508320e850 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
@@ -82,7 +82,7 @@ public final class MetadataKeyFilters {
     private int keysScanned = 0;
     private int keysHinted = 0;
 
-    public KeyPrefixFilter() { }
+    public KeyPrefixFilter() {}
 
     /**
      * KeyPrefixFilter constructor. It is made of positive and negative prefix
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
index 1d1bff1bbc..ec4c0e1a25 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
@@ -47,7 +47,7 @@ public final class TransactionInfo {
   private TransactionInfo(String transactionInfo) {
     String[] tInfo =
         transactionInfo.split(TRANSACTION_INFO_SPLIT_KEY);
-    Preconditions.checkState(tInfo.length == 2,
+    Preconditions.checkState(tInfo.length==2,
         "Incorrect TransactionInfo value");
 
     term = Long.parseLong(tInfo[0]);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
index c9bf38504f..50ac54f921 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
@@ -124,8 +124,8 @@ public final class DBConfigFromFile {
     Env env = Env.getDefault();
     DBOptions options = null;
     File configLocation = getConfigLocation();
-    if (configLocation != null &&
-        StringUtil.isNotBlank(configLocation.toString())) {
+    if(configLocation != null &&
+        StringUtil.isNotBlank(configLocation.toString())){
       Path optionsFile = Paths.get(configLocation.toString(),
           getOptionsFileNameFromDB(dbFileName));
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
index 2ac2bdc730..f0096ed9d8 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
@@ -187,14 +187,4 @@ public interface DBStore extends AutoCloseable, BatchOperationHandler {
    */
   DBUpdatesWrapper getUpdatesSince(long sequenceNumber)
       throws SequenceNumberNotFoundException;
-
-  /**
-   * Get limited data written to DB since a specific sequence number.
-   * @param sequenceNumber
-   * @param limitCount
-   * @return
-   * @throws SequenceNumberNotFoundException
-   */
-  DBUpdatesWrapper getUpdatesSince(long sequenceNumber, long limitCount)
-      throws SequenceNumberNotFoundException;
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index 8b07003c9c..ad48a19927 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -168,7 +168,7 @@ public final class DBStoreBuilder {
    * @return DBStore
    */
   public DBStore build() throws IOException {
-    if (StringUtil.isBlank(dbname) || (dbPath == null)) {
+    if(StringUtil.isBlank(dbname) || (dbPath == null)) {
       LOG.error("Required Parameter missing.");
       throw new IOException("Required parameter is missing. Please make sure "
           + "Path and DB name is provided.");
@@ -340,7 +340,7 @@ public final class DBStoreBuilder {
         try {
           option = DBConfigFromFile.readFromFile(dbname,
               columnFamilyDescriptors);
-          if (option != null) {
+          if(option != null) {
             LOG.info("Using RocksDB DBOptions from {}.ini file", dbname);
           }
         } catch (IOException ex) {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index eb71ec1783..b50b46225e 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -382,15 +382,7 @@ public class RDBStore implements DBStore {
   @Override
   public DBUpdatesWrapper getUpdatesSince(long sequenceNumber)
       throws SequenceNumberNotFoundException {
-    return getUpdatesSince(sequenceNumber, Long.MAX_VALUE);
-  }
 
-  @Override
-  public DBUpdatesWrapper getUpdatesSince(long sequenceNumber, long limitCount)
-      throws SequenceNumberNotFoundException {
-    if (limitCount <= 0) {
-      throw new IllegalArgumentException("Illegal count for getUpdatesSince.");
-    }
     DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper();
     try {
       TransactionLogIterator transactionLogIterator =
@@ -423,9 +415,6 @@ public class RDBStore implements DBStore {
         }
         dbUpdatesWrapper.addWriteBatch(result.writeBatch().data(),
             result.sequenceNumber());
-        if (currSequenceNumber - sequenceNumber >= limitCount) {
-          break;
-        }
         transactionLogIterator.next();
       }
     } catch (RocksDBException e) {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index c7f6196a63..f92306ab43 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -98,7 +98,7 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, VALUE> {
     if (cacheType == CacheType.FULL_CACHE) {
       cache = new FullTableCache<>();
       //fill cache
-      try (TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> tableIterator =
+      try(TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> tableIterator =
               iterator()) {
 
         while (tableIterator.hasNext()) {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
index 401d644bc8..7be2921b6a 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
@@ -56,7 +56,7 @@ public class CacheKey<KEY> implements Comparable<KEY> {
 
   @Override
   public int compareTo(Object o) {
-    if (Objects.equals(key, ((CacheKey<?>)o).key)) {
+    if(Objects.equals(key, ((CacheKey<?>)o).key)) {
       return 0;
     } else {
       return key.toString().compareTo((((CacheKey<?>) o).key).toString());
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java
index 120a08bcee..d87e90d36d 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java
@@ -63,7 +63,7 @@ public class EpochEntry<CACHEKEY> implements Comparable<CACHEKEY> {
 
   @Override
   public int compareTo(Object o) {
-    if (this.epoch == ((EpochEntry<?>)o).epoch) {
+    if(this.epoch == ((EpochEntry<?>)o).epoch) {
       return 0;
     } else if (this.epoch < ((EpochEntry<?>)o).epoch) {
       return -1;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
index a2b2e775c7..39bf082907 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
@@ -92,7 +92,7 @@ public class MockCAStore implements CertificateStore {
   }
 
   @Override
-  public void reinitialize(SCMMetadataStore metadataStore) { }
+  public void reinitialize(SCMMetadataStore metadataStore) {}
 
   @Override
   public List<CRLInfo> getCrls(List<Long> crlIds) throws IOException {
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java
index d6df77fc30..3d32a3312c 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java
@@ -138,7 +138,7 @@ public class TestCRLCodec {
     assertTrue(crlFile.exists());
 
     try (BufferedReader reader = new BufferedReader(new InputStreamReader(
-        new FileInputStream(crlFile), UTF_8))) {
+        new FileInputStream(crlFile), UTF_8))){
 
       // Verify contents of the file
       String header = reader.readLine();
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java
index 1aab7a5de4..5b1a1f032a 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java
@@ -279,8 +279,8 @@ public class TestCertificateSignRequest {
         GeneralNames.fromExtensions(
             extensions, Extension.subjectAlternativeName);
     GeneralName[] names = gns.getNames();
-    for (int i = 0; i < names.length; i++) {
-      if (names[i].getTagNo() == GeneralName.otherName) {
+    for(int i=0; i < names.length; i++) {
+      if(names[i].getTagNo() == GeneralName.otherName) {
         ASN1Encodable asn1Encodable = names[i].getName();
         Iterator iterator = ((DLSequence) asn1Encodable).iterator();
         while (iterator.hasNext()) {
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java
index 776aa4af56..1e3a8f4610 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java
@@ -165,7 +165,7 @@ public class TestRootCertificate {
       OzoneSecurityUtil.getValidInetsForCurrentHost().forEach(
           ip -> {
             builder.addIpAddress(ip.getHostAddress());
-            if (validator.isValid(ip.getCanonicalHostName())) {
+            if(validator.isValid(ip.getCanonicalHostName())) {
               builder.addDnsName(ip.getCanonicalHostName());
             }
           });
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
index 2fef2b8736..9bad0f3107 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
@@ -79,7 +79,7 @@ public class TestHDDSKeyGenerator {
     HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration());
     KeyPair keyPair = keyGen.generateKey(4096);
     PublicKey publicKey = keyPair.getPublic();
-    if (publicKey instanceof RSAPublicKey) {
+    if(publicKey instanceof RSAPublicKey) {
       Assert.assertEquals(4096,
           ((RSAPublicKey)(publicKey)).getModulus().bitLength());
     }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
index e78bcb0085..99fcbae804 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
@@ -69,7 +69,7 @@ public class TestDBStoreBuilder {
   public void builderWithOneParamV2() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
-    if (!newFolder.exists()) {
+    if(!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     thrown.expect(IOException.class);
@@ -82,7 +82,7 @@ public class TestDBStoreBuilder {
   public void builderWithOpenClose() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
-    if (!newFolder.exists()) {
+    if(!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     DBStore dbStore = DBStoreBuilder.newBuilder(conf)
@@ -97,7 +97,7 @@ public class TestDBStoreBuilder {
   public void builderWithDoubleTableName() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
-    if (!newFolder.exists()) {
+    if(!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     // Registering a new table with the same name should replace the previous
@@ -127,7 +127,7 @@ public class TestDBStoreBuilder {
   public void builderWithDataWrites() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
-    if (!newFolder.exists()) {
+    if(!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     try (DBStore dbStore = DBStoreBuilder.newBuilder(conf)
@@ -156,7 +156,7 @@ public class TestDBStoreBuilder {
   public void builderWithDiskProfileWrites() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
-    if (!newFolder.exists()) {
+    if(!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     try (DBStore dbStore = DBStoreBuilder.newBuilder(conf)
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index ed8744ceba..34d348f416 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -74,7 +74,7 @@ public class TestRDBStore {
     statistics.setStatsLevel(StatsLevel.ALL);
     options = options.setStatistics(statistics);
     configSet = new HashSet<>();
-    for (String name : families) {
+    for(String name : families) {
       TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
       configSet.add(newConfig);
     }
@@ -288,32 +288,44 @@ public class TestRDBStore {
     }
   }
 
+  /**
+   * Not strictly a unit test. Just a confirmation of the expected behavior
+   * of RocksDB keyMayExist API.
+   * Expected behavior - On average, keyMayExist latency < key.get() latency
+   * for invalid keys.
+   * @throws Exception if unable to read from RocksDB.
+   */
   @Test
-  public void testGetDBUpdatesSince() throws Exception {
-
+  public void testRocksDBKeyMayExistApi() throws Exception {
     try (RDBStore newStore =
              new RDBStore(folder.newFolder(), options, configSet)) {
+      RocksDB db = newStore.getDb();
 
-      try (Table firstTable = newStore.getTable(families.get(1))) {
-        firstTable.put(
-            org.apache.commons.codec.binary.StringUtils.getBytesUtf16("Key1"),
+      //Test with 50 invalid keys.
+      long start = System.nanoTime();
+      for (int i = 0; i < 50; i++) {
+        Assert.assertTrue(db.get(
             org.apache.commons.codec.binary.StringUtils
-                .getBytesUtf16("Value1"));
-        firstTable.put(
-            org.apache.commons.codec.binary.StringUtils.getBytesUtf16("Key2"),
+                .getBytesUtf16("key" + i)) == null);
+      }
+      long end = System.nanoTime();
+      long keyGetLatency = end - start;
+
+      start = System.nanoTime();
+      for (int i = 0; i < 50; i++) {
+        Assert.assertFalse(db.keyMayExist(
             org.apache.commons.codec.binary.StringUtils
-                .getBytesUtf16("Value2"));
+                .getBytesUtf16("key" + i), null));
       }
-      Assert.assertTrue(
-          newStore.getDb().getLatestSequenceNumber() == 2);
+      end = System.nanoTime();
+      long keyMayExistLatency = end - start;
 
-      DBUpdatesWrapper dbUpdatesSince = newStore.getUpdatesSince(0);
-      Assert.assertEquals(2, dbUpdatesSince.getData().size());
+      Assert.assertTrue(keyMayExistLatency < keyGetLatency);
     }
   }
 
   @Test
-  public void testGetDBUpdatesSinceWithLimitCount() throws Exception {
+  public void testGetDBUpdatesSince() throws Exception {
 
     try (RDBStore newStore =
              new RDBStore(folder.newFolder(), options, configSet)) {
@@ -331,8 +343,8 @@ public class TestRDBStore {
       Assert.assertTrue(
           newStore.getDb().getLatestSequenceNumber() == 2);
 
-      DBUpdatesWrapper dbUpdatesSince = newStore.getUpdatesSince(0, 1);
-      Assert.assertEquals(1, dbUpdatesSince.getData().size());
+      DBUpdatesWrapper dbUpdatesSince = newStore.getUpdatesSince(0);
+      Assert.assertEquals(2, dbUpdatesSince.getData().size());
     }
   }
 
@@ -358,7 +370,7 @@ public class TestRDBStore {
     options.setCreateMissingColumnFamilies(true);
     configSet = new HashSet<>();
     List<String> familiesMinusOne = families.subList(0, families.size() - 1);
-    for (String name : familiesMinusOne) {
+    for(String name : familiesMinusOne) {
       TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
       configSet.add(newConfig);
     }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java
index b49556df9f..fea40bbf30 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java
@@ -92,7 +92,7 @@ public class TestRDBStoreIterator {
   }
 
   @Test
-  public void testHasNextDependsOnIsvalid() {
+  public void testHasNextDependsOnIsvalid(){
     when(rocksDBIteratorMock.isValid()).thenReturn(true, true, false);
 
     RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock);
@@ -169,7 +169,7 @@ public class TestRDBStoreIterator {
 
     RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock);
     byte[] key = null;
-    if (iter.hasNext()) {
+    if(iter.hasNext()) {
       ByteArrayKeyValue entry = iter.next();
       key = entry.getKey();
     }
@@ -191,7 +191,7 @@ public class TestRDBStoreIterator {
     ByteArrayKeyValue entry;
     byte[] key = null;
     byte[] value = null;
-    if (iter.hasNext()) {
+    if(iter.hasNext()) {
       entry = iter.next();
       key = entry.getKey();
       value = entry.getValue();
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
index 0f1858b902..5d007630e5 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
@@ -63,7 +63,7 @@ public class TestRDBTableStore {
     count++;
     try {
       Assert.assertNotNull(keyValue.getKey());
-    } catch (IOException ex) {
+    } catch(IOException ex) {
       Assert.fail("Unexpected Exception " + ex.toString());
     }
     return true;
@@ -80,7 +80,7 @@ public class TestRDBTableStore {
     options = options.setStatistics(statistics);
 
     Set<TableConfig> configSet = new HashSet<>();
-    for (String name : families) {
+    for(String name : families) {
       TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
       configSet.add(newConfig);
     }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
index 837ea27e54..073027f263 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
@@ -296,7 +296,7 @@ public class TestTypedRDBTableStore {
       }
 
       ArrayList<Long> epochs = new ArrayList<>();
-      for (long i = 0; i <= 5L; i++) {
+      for (long i=0; i<=5L; i++) {
         epochs.add(i);
       }
       testTable.cleanupCache(epochs);
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java
index 860a695cda..a1cc7ddec5 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java
@@ -74,13 +74,13 @@ public class TestTableCache {
   public void testPartialTableCache() {
 
 
-    for (int i = 0; i < 10; i++) {
+    for (int i = 0; i< 10; i++) {
       tableCache.put(new CacheKey<>(Integer.toString(i)),
           new CacheValue<>(Optional.of(Integer.toString(i)), i));
     }
 
 
-    for (int i = 0; i < 10; i++) {
+    for (int i=0; i < 10; i++) {
       Assert.assertEquals(Integer.toString(i),
           tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue());
     }
@@ -94,7 +94,7 @@ public class TestTableCache {
     // On a full table cache if some one calls cleanup it is a no-op.
     tableCache.evictCache(epochs);
 
-    for (int i = 5; i < 10; i++) {
+    for (int i=5; i < 10; i++) {
       Assert.assertEquals(Integer.toString(i),
           tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue());
     }
@@ -109,7 +109,7 @@ public class TestTableCache {
     int cleanupCount = 0;
 
     ArrayList<Long> epochs = new ArrayList();
-    for (long i = 0; i < insertedCount; i += 2) {
+    for (long i=0; i<insertedCount; i+=2) {
       if (cleanupCount++ < 1000) {
         epochs.add(i);
       }
@@ -329,7 +329,7 @@ public class TestTableCache {
         });
 
     // Check we have first 10 entries in cache.
-    for (int i = 1; i <= 10; i++) {
+    for (int i=1; i <= 10; i++) {
       Assert.assertEquals(Integer.toString(i),
           tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue());
     }
@@ -357,13 +357,13 @@ public class TestTableCache {
       final int tc = totalCount;
       Assert.assertEquals(tc - deleted, tableCache.size());
       // Check if we have remaining entries.
-      for (int i = 6; i <= totalCount; i++) {
+      for (int i=6; i <= totalCount; i++) {
         Assert.assertEquals(Integer.toString(i), tableCache.get(
             new CacheKey<>(Integer.toString(i))).getCacheValue());
       }
 
       epochs = new ArrayList<>();
-      for (long i = 6; i <= totalCount; i++) {
+      for (long i=6; i<= totalCount; i++) {
         epochs.add(i);
       }
 
@@ -373,7 +373,7 @@ public class TestTableCache {
       Assert.assertEquals(0, tableCache.size());
     } else {
       ArrayList<Long> epochs = new ArrayList<>();
-      for (long i = 0; i <= totalCount; i++) {
+      for (long i=0; i<= totalCount; i++) {
         epochs.add(i);
       }
       tableCache.evictCache(epochs);
@@ -453,7 +453,7 @@ public class TestTableCache {
 
     tableCache.evictCache(epochs);
 
-    if (cacheType == TableCache.CacheType.PARTIAL_CACHE) {
+    if(cacheType == TableCache.CacheType.PARTIAL_CACHE) {
       Assert.assertTrue(tableCache.size() == 0);
       Assert.assertTrue(tableCache.getEpochEntrySet().size() == 0);
     } else {
@@ -475,7 +475,7 @@ public class TestTableCache {
   private int writeToCache(int count, int startVal, long sleep)
       throws InterruptedException {
     int counter = 1;
-    while (counter <= count) {
+    while (counter <= count){
       tableCache.put(new CacheKey<>(Integer.toString(startVal)),
           new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal));
       startVal++;
diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
index e2d7b1663e..4830c11b9b 100644
--- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
+++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
@@ -74,9 +74,6 @@ message ScmContainerLocationRequest {
   optional ContainerBalancerStatusRequestProto containerBalancerStatusRequest = 35;
   optional FinalizeScmUpgradeRequestProto finalizeScmUpgradeRequest = 36;
   optional QueryUpgradeFinalizationProgressRequestProto queryUpgradeFinalizationProgressRequest = 37;
-  optional GetContainerCountRequestProto getContainerCountRequest = 38;
-  optional GetContainerReplicasRequestProto getContainerReplicasRequest = 39;
-  optional ReplicationManagerReportRequestProto replicationManagerReportRequest = 40;
 }
 
 message ScmContainerLocationResponse {
@@ -122,9 +119,6 @@ message ScmContainerLocationResponse {
   optional ContainerBalancerStatusResponseProto containerBalancerStatusResponse = 35;
   optional FinalizeScmUpgradeResponseProto finalizeScmUpgradeResponse = 36;
   optional QueryUpgradeFinalizationProgressResponseProto queryUpgradeFinalizationProgressResponse = 37;
-  optional GetContainerCountResponseProto getContainerCountResponse = 38;
-  optional GetContainerReplicasResponseProto getContainerReplicasResponse = 39;
-  optional ReplicationManagerReportResponseProto getReplicationManagerReportResponse = 40;
 
   enum Status {
     OK = 1;
@@ -168,9 +162,6 @@ enum Type {
   GetContainerBalancerStatus = 30;
   FinalizeScmUpgrade = 31;
   QueryUpgradeFinalizationProgress = 32;
-  GetContainerCount = 33;
-  GetContainerReplicas = 34;
-  GetReplicationManagerReport = 35;
 }
 
 /**
@@ -219,15 +210,6 @@ message GetContainerWithPipelineResponseProto {
   required ContainerWithPipeline containerWithPipeline = 1;
 }
 
-message GetContainerReplicasRequestProto {
-  required int64 containerID = 1;
-  optional string traceID = 2;
-}
-
-message GetContainerReplicasResponseProto {
-  repeated SCMContainerReplicaProto containerReplica = 1;
-}
-
 message GetContainerWithPipelineBatchRequestProto {
   repeated int64 containerIDs = 1;
   optional string traceID = 2;
@@ -401,13 +383,6 @@ message GetPipelineResponseProto {
   required Pipeline pipeline = 1;
 }
 
-message GetContainerCountRequestProto {
-}
-
-message GetContainerCountResponseProto {
-  required int64 containerCount = 1;
-}
-
 message ActivatePipelineRequestProto {
   required PipelineID pipelineID = 1;
   optional string traceID = 2;
@@ -471,14 +446,6 @@ message ReplicationManagerStatusResponseProto {
   required bool isRunning = 1;
 }
 
-message ReplicationManagerReportRequestProto {
-  optional string traceID = 1;
-}
-
-message ReplicationManagerReportResponseProto {
-  required ReplicationManagerReportProto report = 1;
-}
-
 message FinalizeScmUpgradeRequestProto {
   required string upgradeClientId = 1;
 }
@@ -512,14 +479,11 @@ message GetContainerTokenResponseProto {
 message StartContainerBalancerRequestProto {
   optional string traceID = 1;
   optional double threshold = 2;
-  optional int32 idleiterations = 3 [deprecated = true];
-  optional double maxDatanodesRatioToInvolvePerIteration = 4 [deprecated =
-      true];
+  optional int32 idleiterations = 3;
+  optional double maxDatanodesRatioToInvolvePerIteration = 4;
   optional int64 maxSizeToMovePerIterationInGB = 5;
   optional int64 maxSizeEnteringTargetInGB = 6;
   optional int64 maxSizeLeavingSourceInGB = 7;
-  optional int32 maxDatanodesPercentageToInvolvePerIteration = 8;
-  optional int32 iterations = 9;
 }
 
 message StartContainerBalancerResponseProto {
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index b55531e8a7..133f4c694f 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -379,28 +379,3 @@ message ContainerReplicaHistoryProto {
     required int64 bcsId = 4;
 }
 
-message SCMContainerReplicaProto {
-    required int64 containerID = 1;
-    required string state = 2;
-    required DatanodeDetailsProto datanodeDetails = 3;
-    required string placeOfBirth = 4;
-    required int64 sequenceID = 5;
-    required int64 keyCount = 6;
-    required int64 bytesUsed = 7;
-}
-
-message KeyContainerIDList {
-    required string key = 1;
-    repeated ContainerID container = 2;
-}
-
-message KeyIntValue {
-    required string key = 1;
-    optional int64 value = 2;
-}
-
-message ReplicationManagerReportProto {
-    required int64 timestamp = 1;
-    repeated KeyIntValue stat = 2;
-    repeated KeyContainerIDList statSample = 3;
-}
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index 0b5071a858..d7a5e720b5 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -106,6 +106,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>assertj-core</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.openjdk.jmh</groupId>
+      <artifactId>jmh-core</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.openjdk.jmh</groupId>
+      <artifactId>jmh-generator-annprocess</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-core</artifactId>
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 78a87b8820..3c5fdc0f2e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -442,7 +442,7 @@ public class DeletedBlockLogImpl
       commitTransactions(ackProto.getResultsList(),
           UUID.fromString(ackProto.getDnId()));
       metrics.incrBlockDeletionCommandSuccess();
-    } else if (status == CommandStatus.Status.FAILED) {
+    } else if (status == CommandStatus.Status.FAILED){
       metrics.incrBlockDeletionCommandFailure();
     } else {
       LOG.error("Delete Block Command is not executed yet.");
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java
index 3dab4ad83f..26368b46e4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java
@@ -100,15 +100,15 @@ public class ContainerReplicaCount {
 
   @Override
   public String toString() {
-    return "Container State: " + container.getState() +
-        " Replica Count: " + replica.size() +
-        " Healthy Count: " + healthyCount +
-        " Decommission Count: " + decommissionCount +
-        " Maintenance Count: " + maintenanceCount +
-        " inFlightAdd Count: " + inFlightAdd +
-        " inFightDel Count: " + inFlightDel +
-        " ReplicationFactor: " + repFactor +
-        " minMaintenance Count: " + minHealthyForMaintenance;
+    return "Container State: " +container.getState()+
+        " Replica Count: "+replica.size()+
+        " Healthy Count: "+healthyCount+
+        " Decommission Count: "+decommissionCount+
+        " Maintenance Count: "+maintenanceCount+
+        " inFlightAdd Count: "+inFlightAdd+
+        " inFightDel Count: "+inFlightDel+
+        " ReplicationFactor: "+repFactor+
+        " minMaintenance Count: "+minHealthyForMaintenance;
   }
 
   /**
@@ -269,14 +269,4 @@ public class ContainerReplicaCount {
         .allMatch(r -> ReplicationManager.compareState(
             container.getState(), r.getState()));
   }
-
-  /**
-   * Returns true is there are no replicas of a container available, ie the
-   * set of container replica passed in the constructor has zero entries.
-   *
-   * @return true if there are no replicas, false otherwise.
-   */
-  public boolean isMissing() {
-    return replica.size() == 0;
-  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 8a50884321..32804d7a8d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -164,7 +164,7 @@ public class ContainerReportHandler extends AbstractContainerReportHandler
       try {
         processContainerReplica(datanodeDetails, replicaProto, publisher);
       } catch (ContainerNotFoundException e) {
-        if (unknownContainerHandleAction.equals(
+        if(unknownContainerHandleAction.equals(
             UNKNOWN_CONTAINER_ACTION_WARN)) {
           LOG.error("Received container report for an unknown container" +
               " {} from datanode {}.", replicaProto.getContainerID(),
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index 59e73a3651..47842c0ad5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -87,7 +87,6 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.hdds.utils.db.Table;
 import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
-import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState;
 
 import com.google.protobuf.GeneratedMessage;
 import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
@@ -255,11 +254,6 @@ public class ReplicationManager implements SCMService {
    */
   private final MoveScheduler moveScheduler;
 
-  /**
-   * Report object that is refreshed each time replication Manager runs.
-   */
-  private ReplicationManagerReport containerReport;
-
   /**
    * Constructs ReplicationManager instance with the given configuration.
    *
@@ -292,7 +286,6 @@ public class ReplicationManager implements SCMService {
     this.inflightMoveFuture = new ConcurrentHashMap<>();
     this.minHealthyForMaintenance = rmConf.getMaintenanceReplicaMinimum();
     this.clock = clock;
-    this.containerReport = new ReplicationManagerReport();
 
     this.waitTimeInMillis = conf.getTimeDuration(
         HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
@@ -370,29 +363,16 @@ public class ReplicationManager implements SCMService {
    * This in intended to be used in tests.
    */
   public synchronized void processAll() {
-    if (!shouldRun()) {
-      LOG.info("Replication Manager is not ready to run until {}ms after " +
-          "safemode exit", waitTimeInMillis);
-      return;
-    }
     final long start = clock.millis();
     final List<ContainerInfo> containers =
         containerManager.getContainers();
-    ReplicationManagerReport report = new ReplicationManagerReport();
-    for (ContainerInfo c : containers) {
-      processContainer(c, report);
-    }
-    report.setComplete();
-    containerReport = report;
+    containers.forEach(this::processContainer);
+
     LOG.info("Replication Monitor Thread took {} milliseconds for" +
             " processing {} containers.", clock.millis() - start,
         containers.size());
   }
 
-  public ReplicationManagerReport getContainerReport() {
-    return containerReport;
-  }
-
   /**
    * ReplicationMonitor thread runnable. This wakes up at configured
    * interval and processes all the containers in the system.
@@ -418,9 +398,7 @@ public class ReplicationManager implements SCMService {
    *
    * @param container ContainerInfo
    */
-  @SuppressWarnings("checkstyle:methodlength")
-  private void processContainer(ContainerInfo container,
-      ReplicationManagerReport report) {
+  private void processContainer(ContainerInfo container) {
     if (!shouldRun()) {
       return;
     }
@@ -432,7 +410,6 @@ public class ReplicationManager implements SCMService {
         final Set<ContainerReplica> replicas = containerManager
             .getContainerReplicas(id);
         final LifeCycleState state = container.getState();
-        report.increment(state);
 
         /*
          * We don't take any action if the container is in OPEN state and
@@ -441,8 +418,6 @@ public class ReplicationManager implements SCMService {
          */
         if (state == LifeCycleState.OPEN) {
           if (!isOpenContainerHealthy(container, replicas)) {
-            report.incrementAndSample(
-                HealthState.OPEN_UNHEALTHY, container.containerID());
             eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, id);
           }
           return;
@@ -467,14 +442,10 @@ public class ReplicationManager implements SCMService {
          * If the container is in QUASI_CLOSED state, check and close the
          * container if possible.
          */
-        if (state == LifeCycleState.QUASI_CLOSED) {
-          if (canForceCloseContainer(container, replicas)) {
-            forceCloseContainer(container, replicas);
-            return;
-          } else {
-            report.incrementAndSample(HealthState.QUASI_CLOSED_STUCK,
-                container.containerID());
-          }
+        if (state == LifeCycleState.QUASI_CLOSED &&
+            canForceCloseContainer(container, replicas)) {
+          forceCloseContainer(container, replicas);
+          return;
         }
 
         /*
@@ -487,7 +458,7 @@ public class ReplicationManager implements SCMService {
         updateInflightAction(container, inflightReplication,
             action -> replicas.stream()
                 .anyMatch(r -> r.getDatanodeDetails().equals(action.datanode)),
-            () -> metrics.incrNumReplicationCmdsTimeout(),
+            ()-> metrics.incrNumReplicationCmdsTimeout(),
             action -> updateCompletedReplicationMetrics(container, action));
 
         updateInflightAction(container, inflightDeletion,
@@ -527,8 +498,6 @@ public class ReplicationManager implements SCMService {
          * exact number of replicas in the same state.
          */
         if (isContainerEmpty(container, replicas)) {
-          report.incrementAndSample(
-              HealthState.EMPTY, container.containerID());
           /*
            *  If container is empty, schedule task to delete the container.
            */
@@ -540,22 +509,8 @@ public class ReplicationManager implements SCMService {
          * Check if the container is under replicated and take appropriate
          * action.
          */
-        boolean sufficientlyReplicated = replicaSet.isSufficientlyReplicated();
-        boolean placementSatisfied = placementStatus.isPolicySatisfied();
-        if (!sufficientlyReplicated || !placementSatisfied) {
-          if (!sufficientlyReplicated) {
-            report.incrementAndSample(
-                HealthState.UNDER_REPLICATED, container.containerID());
-            if (replicaSet.isMissing()) {
-              report.incrementAndSample(HealthState.MISSING,
-                  container.containerID());
-            }
-          }
-          if (!placementSatisfied) {
-            report.incrementAndSample(HealthState.MIS_REPLICATED,
-                container.containerID());
-
-          }
+        if (!replicaSet.isSufficientlyReplicated()
+            || !placementStatus.isPolicySatisfied()) {
           handleUnderReplicatedContainer(container,
               replicaSet, placementStatus);
           return;
@@ -566,8 +521,6 @@ public class ReplicationManager implements SCMService {
          * action.
          */
         if (replicaSet.isOverReplicated()) {
-          report.incrementAndSample(HealthState.OVER_REPLICATED,
-              container.containerID());
           handleOverReplicatedContainer(container, replicaSet);
           return;
         }
@@ -578,8 +531,6 @@ public class ReplicationManager implements SCMService {
        are not in the same state as the container itself.
        */
         if (!replicaSet.isHealthy()) {
-          report.incrementAndSample(HealthState.UNHEALTHY,
-              container.containerID());
           handleUnstableContainer(container, replicas);
         }
       }
@@ -624,7 +575,7 @@ public class ReplicationManager implements SCMService {
       final List<InflightAction> actions = inflightActions.get(id);
 
       Iterator<InflightAction> iter = actions.iterator();
-      while (iter.hasNext()) {
+      while(iter.hasNext()) {
         try {
           InflightAction a = iter.next();
           NodeStatus status = nodeManager.getNodeStatus(a.datanode);
@@ -919,7 +870,7 @@ public class ReplicationManager implements SCMService {
    */
   private boolean isPolicySatisfiedAfterMove(ContainerInfo cif,
                     DatanodeDetails srcDn, DatanodeDetails targetDn,
-                    final List<ContainerReplica> replicas) {
+                    final List<ContainerReplica> replicas){
     Set<ContainerReplica> movedReplicas =
         replicas.stream().collect(Collectors.toSet());
     movedReplicas.removeIf(r -> r.getDatanodeDetails().equals(srcDn));
@@ -1157,7 +1108,7 @@ public class ReplicationManager implements SCMService {
 
       if (replicaSet.isSufficientlyReplicated()
           && placementStatus.isPolicySatisfied()) {
-        LOG.info("The container {} with replicas {} is sufficiently " +
+        LOG.info("The container {} with replicas {} is sufficiently "+
             "replicated and is not mis-replicated",
             container.getContainerID(), replicaSet);
         return;
@@ -1348,8 +1299,8 @@ public class ReplicationManager implements SCMService {
     ContainerReplicaCount replicaCount =
         getContainerReplicaCount(cif, replicaSet);
 
-    if (!replicaSet.stream()
-        .anyMatch(r -> r.getDatanodeDetails().equals(srcDn))) {
+    if(!replicaSet.stream()
+        .anyMatch(r -> r.getDatanodeDetails().equals(srcDn))){
       // if the target is present but source disappears somehow,
       // we can consider move is successful.
       compleleteMoveFutureWithResult(cid, MoveResult.COMPLETED);
@@ -1654,7 +1605,7 @@ public class ReplicationManager implements SCMService {
     try {
       return nodeManager.getNodeStatus(dn);
     } catch (NodeNotFoundException e) {
-      throw new IllegalStateException("Unable to find NodeStatus for " + dn, e);
+      throw new IllegalStateException("Unable to find NodeStatus for "+dn, e);
     }
   }
 
@@ -1944,7 +1895,7 @@ public class ReplicationManager implements SCMService {
       try {
         cid = ContainerID.getFromProtobuf(contianerIDProto);
         mp = MoveDataNodePair.getFromProtobuf(mdnpp);
-        if (!inflightMove.containsKey(cid)) {
+        if(!inflightMove.containsKey(cid)) {
           transactionBuffer.addToBuffer(moveTable, cid, mp);
           inflightMove.putIfAbsent(cid, mp);
         }
@@ -2055,8 +2006,8 @@ public class ReplicationManager implements SCMService {
       boolean isTgtExist = replicas.stream()
           .anyMatch(r -> r.getDatanodeDetails().equals(v.getTgt()));
 
-      if (isSrcExist) {
-        if (isTgtExist) {
+      if(isSrcExist) {
+        if(isTgtExist) {
           //the former scm leader may or may not send the deletion command
           //before reelection.here, we just try to send the command again.
           deleteSrcDnForMove(cif, replicas);
@@ -2081,8 +2032,8 @@ public class ReplicationManager implements SCMService {
    * complete the CompletableFuture of the container in the given Map with
    * a given MoveResult.
    */
-  private void compleleteMoveFutureWithResult(ContainerID cid, MoveResult mr) {
-    if (inflightMoveFuture.containsKey(cid)) {
+  private void compleleteMoveFutureWithResult(ContainerID cid, MoveResult mr){
+    if(inflightMoveFuture.containsKey(cid)) {
       inflightMoveFuture.get(cid).complete(mr);
       inflightMoveFuture.remove(cid);
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
index 018f0dfd25..a975f04cfc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
@@ -70,7 +70,7 @@ public abstract class AbstractFindTargetGreedy implements FindTargetStrategy {
     potentialTargets = pt;
   }
 
-  private void setUpperLimit(Double upperLimit) {
+  private void setUpperLimit(Double upperLimit){
     this.upperLimit = upperLimit;
   }
 
@@ -199,12 +199,12 @@ public abstract class AbstractFindTargetGreedy implements FindTargetStrategy {
    */
   @Override
   public void increaseSizeEntering(DatanodeDetails target, long size) {
-    if (sizeEnteringNode.containsKey(target)) {
+    if(sizeEnteringNode.containsKey(target)) {
       long totalEnteringSize = sizeEnteringNode.get(target) + size;
       sizeEnteringNode.put(target, totalEnteringSize);
       potentialTargets.removeIf(
           c -> c.getDatanodeDetails().equals(target));
-      if (totalEnteringSize < config.getMaxSizeEnteringTarget()) {
+      if(totalEnteringSize < config.getMaxSizeEnteringTarget()) {
         //reorder
         potentialTargets.add(nodeManager.getUsageInfo(target));
       }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java
index 995a5da111..d7d3b6617f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java
@@ -74,7 +74,7 @@ public class ContainerBalancer {
   private long maxSizeToMovePerIteration;
   private int countDatanodesInvolvedPerIteration;
   private long sizeMovedPerIteration;
-  private int iterations;
+  private int idleIteration;
   private List<DatanodeUsageInfo> unBalancedNodes;
   private List<DatanodeUsageInfo> overUtilizedNodes;
   private List<DatanodeUsageInfo> underUtilizedNodes;
@@ -155,12 +155,9 @@ public class ContainerBalancer {
         return false;
       }
 
-      this.config = balancerConfiguration;
-      if (!validateConfiguration(config)) {
-        return false;
-      }
-      ozoneConfiguration.setFromObject(balancerConfiguration);
       balancerRunning = true;
+      this.config = balancerConfiguration;
+      validateConfiguration(config);
       LOG.info("Starting Container Balancer...{}", this);
 
       //we should start a new balancer thread async
@@ -179,12 +176,23 @@ public class ContainerBalancer {
    * Balances the cluster.
    */
   private void balance() {
-    this.iterations = config.getIterations();
-    if (this.iterations == -1) {
+    this.idleIteration = config.getIdleIteration();
+    if(this.idleIteration == -1) {
       //run balancer infinitely
-      this.iterations = Integer.MAX_VALUE;
+      this.idleIteration = Integer.MAX_VALUE;
+    }
+    this.threshold = config.getThreshold();
+    this.maxDatanodesRatioToInvolvePerIteration =
+        config.getMaxDatanodesRatioToInvolvePerIteration();
+    this.maxSizeToMovePerIteration = config.getMaxSizeToMovePerIteration();
+    if (config.getNetworkTopologyEnable()) {
+      findTargetStrategy = new FindTargetGreedyByNetworkTopology(
+          containerManager, placementPolicy, nodeManager, networkTopology);
+    } else {
+      findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager,
+          placementPolicy, nodeManager);
     }
-    for (int i = 0; i < iterations && balancerRunning; i++) {
+    for (int i = 0; i < idleIteration && balancerRunning; i++) {
       // stop balancing if iteration is not initialized
       if (!initializeIteration()) {
         stop();
@@ -207,7 +215,7 @@ public class ContainerBalancer {
 
       // wait for configured time before starting next iteration, unless
       // this was the final iteration
-      if (i != iterations - 1) {
+      if (i != idleIteration - 1) {
         synchronized (this) {
           try {
             wait(config.getBalancingInterval().toMillis());
@@ -249,17 +257,6 @@ public class ContainerBalancer {
       }
       return false;
     }
-    this.threshold = config.getThresholdAsRatio();
-    this.maxDatanodesRatioToInvolvePerIteration =
-        config.getMaxDatanodesRatioToInvolvePerIteration();
-    this.maxSizeToMovePerIteration = config.getMaxSizeToMovePerIteration();
-    if (config.getNetworkTopologyEnable()) {
-      findTargetStrategy = new FindTargetGreedyByNetworkTopology(
-          containerManager, placementPolicy, nodeManager, networkTopology);
-    } else {
-      findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager,
-          placementPolicy, nodeManager);
-    }
     this.excludeNodes = config.getExcludeNodes();
     this.includeNodes = config.getIncludeNodes();
     // include/exclude nodes from balancing according to configs
@@ -523,7 +520,7 @@ public class ContainerBalancer {
   }
 
   /**
-   * Checks if limits maxDatanodesPercentageToInvolvePerIteration and
+   * Checks if limits maxDatanodesRatioToInvolvePerIteration and
    * maxSizeToMovePerIteration have not been hit.
    *
    * @return {@link IterationResult#MAX_DATANODES_TO_INVOLVE_REACHED} if reached
@@ -769,7 +766,7 @@ public class ContainerBalancer {
     LOG.info("Container Balancer stopped successfully.");
   }
 
-  private boolean validateConfiguration(ContainerBalancerConfiguration conf) {
+  private void validateConfiguration(ContainerBalancerConfiguration conf) {
     // maxSizeEnteringTarget and maxSizeLeavingSource should by default be
     // greater than container size
     long size = (long) ozoneConfiguration.getStorageSize(
@@ -779,12 +776,10 @@ public class ContainerBalancer {
     if (conf.getMaxSizeEnteringTarget() <= size) {
       LOG.info("MaxSizeEnteringTarget should be larger than " +
           "ozone.scm.container.size");
-      return false;
     }
     if (conf.getMaxSizeLeavingSource() <= size) {
       LOG.info("MaxSizeLeavingSource should be larger than " +
           "ozone.scm.container.size");
-      return false;
     }
 
     // balancing interval should be greater than DUFactory refresh period
@@ -793,9 +788,7 @@ public class ContainerBalancer {
     if (conf.getBalancingInterval().toMillis() <= balancingInterval) {
       LOG.info("balancing.iteration.interval should be larger than " +
           "hdds.datanode.du.refresh.period.");
-      return false;
     }
-    return true;
   }
 
   public void setNodeManager(NodeManager nodeManager) {
@@ -873,7 +866,7 @@ public class ContainerBalancer {
     return countDatanodesInvolvedPerIteration;
   }
 
-  public long getSizeMovedPerIteration() {
+  long getSizeMovedPerIteration() {
     return sizeMovedPerIteration;
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java
index 11a8a98dbe..a51e0bd567 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java
@@ -43,25 +43,27 @@ public final class ContainerBalancerConfiguration {
       LoggerFactory.getLogger(ContainerBalancerConfiguration.class);
 
   @Config(key = "utilization.threshold", type = ConfigType.AUTO, defaultValue =
-      "10", tags = {ConfigTag.BALANCER},
-      description = "Threshold is a percentage in the range of 0 to 100. A " +
+      "0.1", tags = {ConfigTag.BALANCER},
+      description = "Threshold is a fraction in the range of 0 to 1. A " +
           "cluster is considered balanced if for each datanode, the " +
           "utilization of the datanode (used space to capacity ratio) differs" +
           " from the utilization of the cluster (used space to capacity ratio" +
-          " of the entire cluster) no more than the threshold.")
-  private String threshold = "10";
+          " of the entire cluster) no more than the threshold value.")
+  private String threshold = "0.1";
 
-  @Config(key = "datanodes.involved.max.percentage.per.iteration", type =
-      ConfigType.INT, defaultValue = "20", tags = {ConfigTag.BALANCER},
-      description = "Maximum percentage of healthy, in service datanodes " +
-          "that can be involved in balancing in one iteration.")
-  private int maxDatanodesPercentageToInvolvePerIteration = 20;
+  @Config(key = "datanodes.involved.max.ratio.per.iteration", type =
+      ConfigType.AUTO,
+      defaultValue = "0.2", tags = {ConfigTag.BALANCER}, description = "The " +
+      "ratio of maximum number of datanodes that should be involved in " +
+      "balancing in one iteration to the total number of healthy, in service " +
+      "nodes known to container balancer.")
+  private String maxDatanodesRatioToInvolvePerIteration = "0.2";
 
   @Config(key = "size.moved.max.per.iteration", type = ConfigType.SIZE,
-      defaultValue = "500GB", tags = {ConfigTag.BALANCER},
+      defaultValue = "30GB", tags = {ConfigTag.BALANCER},
       description = "The maximum size of data in bytes that will be moved " +
           "by Container Balancer in one iteration.")
-  private long maxSizeToMovePerIteration = 500 * OzoneConsts.GB;
+  private long maxSizeToMovePerIteration = 30 * OzoneConsts.GB;
 
   @Config(key = "size.entering.target.max", type = ConfigType.SIZE,
       defaultValue = "26GB", tags = {ConfigTag.BALANCER}, description = "The " +
@@ -79,11 +81,10 @@ public final class ContainerBalancerConfiguration {
       " (or default) ozone.scm.container.size.")
   private long maxSizeLeavingSource;
 
-  @Config(key = "iterations", type = ConfigType.INT,
+  @Config(key = "idle.iterations", type = ConfigType.INT,
       defaultValue = "10", tags = {ConfigTag.BALANCER},
-      description = "The number of iterations that Container Balancer will " +
-          "run for.")
-  private int iterations = 10;
+      description = "The idle iteration count of Container Balancer.")
+  private int idleIterations = 10;
 
   @Config(key = "exclude.containers", type = ConfigType.STRING, defaultValue =
       "", tags = {ConfigTag.BALANCER}, description = "List of container IDs " +
@@ -92,7 +93,7 @@ public final class ContainerBalancerConfiguration {
 
   @Config(key = "move.timeout", type = ConfigType.TIME, defaultValue = "30m",
       tags = {ConfigTag.BALANCER}, description =
-      "The amount of time to allow a single container to move " +
+      "The amount of time in minutes to allow a single container to move " +
           "from source to target.")
   private long moveTimeout = Duration.ofMinutes(30).toMillis();
 
@@ -100,7 +101,7 @@ public final class ContainerBalancerConfiguration {
       defaultValue = "70m", tags = {
       ConfigTag.BALANCER}, description = "The interval period between each " +
       "iteration of Container Balancer.")
-  private long balancingInterval = Duration.ofMinutes(70).toMillis();
+  private long balancingInterval;
 
   @Config(key = "include.datanodes", type = ConfigType.STRING, defaultValue =
       "", tags = {ConfigTag.BALANCER}, description = "A list of Datanode " +
@@ -126,61 +127,46 @@ public final class ContainerBalancerConfiguration {
   /**
    * Gets the threshold value for Container Balancer.
    *
-   * @return percentage value in the range 0 to 100
+   * @return a fraction in the range 0 to 1
    */
   public double getThreshold() {
     return Double.parseDouble(threshold);
   }
 
-  public double getThresholdAsRatio() {
-    return Double.parseDouble(threshold) / 100;
-  }
-
   /**
    * Sets the threshold value for Container Balancer.
    *
-   * @param threshold a percentage value in the range 0 to 100
+   * @param threshold a fraction in the range 0 to 1
    */
   public void setThreshold(double threshold) {
-    if (threshold < 0d || threshold >= 100d) {
+    if (threshold < 0 || threshold > 1) {
       throw new IllegalArgumentException(
-          "Threshold must be a percentage(double) in the range 0 to 100.");
+          "Threshold must be a fraction in the range 0 to 1.");
     }
     this.threshold = String.valueOf(threshold);
   }
 
   /**
-   * Gets the iteration count for Container Balancer. A value of -1 means
-   * infinite number of iterations.
+   * Gets the idle iteration value for Container Balancer.
    *
-   * @return a value greater than 0, or -1
+   * @return a idle iteration count larger than 0
    */
-  public int getIterations() {
-    return iterations;
+  public int getIdleIteration() {
+    return idleIterations;
   }
 
   /**
-   * Sets the number of iterations for Container Balancer.
+   * Sets the idle iteration value for Container Balancer.
    *
-   * @param count a value greater than 0, or -1 for running balancer infinitely
+   * @param count a idle iteration count larger than 0
    */
-  public void setIterations(int count) {
+  public void setIdleIteration(int count) {
     if (count < -1 || 0 == count) {
       throw new IllegalArgumentException(
-          "Iteration count must be greater than 0, or " +
-              "-1(for running balancer infinitely).");
+          "Idle iteration count must be larger than 0 or " +
+              "-1(for infinitely running).");
     }
-    this.iterations = count;
-  }
-
-  /**
-   * Gets the maximum percentage of healthy, in-service datanodes that will be
-   * involved in balancing in one iteration.
-   *
-   * @return percentage as an integer from 0 up to and including 100
-   */
-  public int getMaxDatanodesPercentageToInvolvePerIteration() {
-    return maxDatanodesPercentageToInvolvePerIteration;
+    this.idleIterations = count;
   }
 
   /**
@@ -202,36 +188,37 @@ public final class ContainerBalancerConfiguration {
   }
 
   /**
-   * Gets the ratio of maximum datanodes involved in balancing to the total
-   * number of healthy, in-service datanodes known to SCM.
+   * Gets the ratio of maximum number of datanodes that will be involved in
+   * balancing by Container Balancer in one iteration to the total number of
+   * healthy, in-service nodes known to balancer.
    *
-   * @return ratio as a double from 0 up to and including 1
+   * @return maximum datanodes to involve divided by total healthy,
+   * in-service nodes
    */
   public double getMaxDatanodesRatioToInvolvePerIteration() {
-    return maxDatanodesPercentageToInvolvePerIteration / 100d;
+    return Double.parseDouble(maxDatanodesRatioToInvolvePerIteration);
   }
 
   /**
-   * Sets the maximum percentage of healthy, in-service datanodes that will be
-   * involved in balancing in one iteration.
+   * Sets the ratio of maximum number of datanodes that will be involved in
+   * balancing by Container Balancer in one iteration to the total number of
+   * healthy, in-service nodes known to balancer.
    *
-   * @param maxDatanodesPercentageToInvolvePerIteration number of datanodes
-   *                                                    to involve divided by
-   *                                                    total number of
-   *                                                    healthy, in-service
-   *                                                    datanodes multiplied
-   *                                                    by 100
+   * @param maxDatanodesRatioToInvolvePerIteration number of datanodes to
+   *                                               involve divided by total
+   *                                               number of healthy, in
+   *                                               service nodes
    */
-  public void setMaxDatanodesPercentageToInvolvePerIteration(
-      int maxDatanodesPercentageToInvolvePerIteration) {
-    if (maxDatanodesPercentageToInvolvePerIteration < 0 ||
-        maxDatanodesPercentageToInvolvePerIteration > 100) {
-      throw new IllegalArgumentException(String.format("Argument %d is " +
-              "illegal. Percentage must be from 0 up to and including 100.",
-          maxDatanodesPercentageToInvolvePerIteration));
+  public void setMaxDatanodesRatioToInvolvePerIteration(
+      double maxDatanodesRatioToInvolvePerIteration) {
+    if (maxDatanodesRatioToInvolvePerIteration < 0 ||
+        maxDatanodesRatioToInvolvePerIteration > 1) {
+      throw new IllegalArgumentException("Max datanodes to involve ratio must" +
+          " be a double greater than equal to zero and lesser than equal to " +
+          "one.");
     }
-    this.maxDatanodesPercentageToInvolvePerIteration =
-        maxDatanodesPercentageToInvolvePerIteration;
+    this.maxDatanodesRatioToInvolvePerIteration =
+        String.valueOf(maxDatanodesRatioToInvolvePerIteration);
   }
 
   /**
@@ -360,12 +347,12 @@ public final class ContainerBalancerConfiguration {
     return String.format("Container Balancer Configuration values:%n" +
             "%-50s %s%n" +
             "%-50s %s%n" +
-            "%-50s %d%n" +
-            "%-50s %dGB%n" +
-            "%-50s %dGB%n" +
+            "%-50s %s%n" +
+            "%-50s %dGB%n"+
+            "%-50s %dGB%n"+
             "%-50s %dGB%n", "Key", "Value", "Threshold",
-        threshold, "Max Datanodes to Involve per Iteration(percent)",
-        maxDatanodesPercentageToInvolvePerIteration,
+        threshold, "Max Datanodes to Involve per Iteration(ratio)",
+        maxDatanodesRatioToInvolvePerIteration,
         "Max Size to Move per Iteration",
         maxSizeToMovePerIteration / OzoneConsts.GB,
         "Max Size Entering Target per Iteration",
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
index 540d26356d..591461d887 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
@@ -33,7 +33,7 @@ import java.util.UUID;
  * The selection criteria for selecting source datanodes , the containers of
  * which will be moved out.
  */
-public class FindSourceGreedy implements FindSourceStrategy {
+public class FindSourceGreedy implements FindSourceStrategy{
   private static final Logger LOG =
       LoggerFactory.getLogger(FindSourceGreedy.class);
   private Map<DatanodeDetails, Long> sizeLeavingNode;
@@ -84,7 +84,7 @@ public class FindSourceGreedy implements FindSourceStrategy {
   @Override
   public void increaseSizeLeaving(DatanodeDetails dui, long size) {
     Long currentSize = sizeLeavingNode.get(dui);
-    if (currentSize != null) {
+    if(currentSize != null) {
       sizeLeavingNode.put(dui, currentSize + size);
       //reorder according to the latest sizeLeavingNode
       potentialSources.add(nodeManager.getUsageInfo(dui));
@@ -114,7 +114,7 @@ public class FindSourceGreedy implements FindSourceStrategy {
    * data nodes.
    */
   @Override
-  public void removeCandidateSourceDataNode(DatanodeDetails dui) {
+  public void removeCandidateSourceDataNode(DatanodeDetails dui){
     potentialSources.removeIf(a -> a.getDatanodeDetails().equals(dui));
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
index bf0ea7cb38..c799b02eee 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
@@ -47,7 +47,7 @@ public final class ContainerPlacementPolicyFactory {
   public static PlacementPolicy getPolicy(
       ConfigurationSource conf, final NodeManager nodeManager,
       NetworkTopology clusterMap, final boolean fallback,
-      SCMContainerPlacementMetrics metrics) throws SCMException {
+      SCMContainerPlacementMetrics metrics) throws SCMException{
     final Class<? extends PlacementPolicy> placementClass = conf
         .getClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
             OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
index 22bdf21df9..1ca68bd3eb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
 /**
  * This class is for maintaining Topology aware container placement statistics.
  */
-@Metrics(about = "SCM Container Placement Metrics", context = OzoneConsts.OZONE)
+@Metrics(about="SCM Container Placement Metrics", context = OzoneConsts.OZONE)
 public class SCMContainerPlacementMetrics implements MetricsSource {
   public static final String SOURCE_NAME =
       SCMContainerPlacementMetrics.class.getSimpleName();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
index 2631a1d951..d46713b602 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
@@ -55,7 +55,7 @@ public final class SCMContainerPlacementRackAware
   private final NetworkTopology networkTopology;
   private boolean fallback;
   private static final int RACK_LEVEL = 1;
-  private static final int MAX_RETRY = 3;
+  private static final int MAX_RETRY= 3;
   private final SCMContainerPlacementMetrics metrics;
   // Used to check the placement policy is validated in the parent class
   private static final int REQUIRED_RACKS = 2;
@@ -118,7 +118,7 @@ public final class SCMContainerPlacementRackAware
       mutableFavoredNodes.addAll(favoredNodes);
       mutableFavoredNodes.removeAll(excludedNodes);
     }
-    int favoredNodeNum = mutableFavoredNodes == null ? 0 :
+    int favoredNodeNum = mutableFavoredNodes == null? 0 :
         mutableFavoredNodes.size();
 
     List<DatanodeDetails> chosenNodes = new ArrayList<>();
@@ -195,7 +195,7 @@ public final class SCMContainerPlacementRackAware
       // in the same rack, then choose nodes on different racks, otherwise,
       // choose one on the same rack as one of excluded nodes, remaining chosen
       // are on different racks.
-      for (int i = 0; i < excludedNodesCount; i++) {
+      for(int i = 0; i < excludedNodesCount; i++) {
         for (int j = i + 1; j < excludedNodesCount; j++) {
           if (networkTopology.isSameParent(
               excludedNodes.get(i), excludedNodes.get(j))) {
@@ -257,7 +257,7 @@ public final class SCMContainerPlacementRackAware
     int maxRetry = MAX_RETRY;
     List<String> excludedNodesForCapacity = null;
     boolean isFallbacked = false;
-    while (true) {
+    while(true) {
       metrics.incrDatanodeChooseAttemptCount();
       DatanodeDetails node = null;
       if (affinityNodes != null) {
@@ -348,8 +348,8 @@ public final class SCMContainerPlacementRackAware
     Preconditions.checkArgument(chosenNodes != null);
     List<DatanodeDetails> excludedNodeList = excludedNodes != null ?
         excludedNodes : chosenNodes;
-    int favoredNodeNum = favoredNodes == null ? 0 : favoredNodes.size();
-    while (true) {
+    int favoredNodeNum = favoredNodes == null? 0 : favoredNodes.size();
+    while(true) {
       DatanodeDetails favoredNode = favoredNodeNum > favorIndex ?
           favoredNodes.get(favorIndex) : null;
       DatanodeDetails chosenNode;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java
index f9d2ade8fd..51948291a4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 /**
  * This class is for maintaining StorageContainerManager statistics.
  */
-@Metrics(about = "Storage Container Manager Metrics", context = "dfs")
+@Metrics(about="Storage Container Manager Metrics", context="dfs")
 public class SCMMetrics {
   public static final String SOURCE_NAME =
       SCMMetrics.class.getSimpleName();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java
index 0f828aef1a..9a70f6974b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java
@@ -16,10 +16,7 @@
  */
 package org.apache.hadoop.hdds.scm.container.replication;
 
-import com.google.common.base.CaseFormat;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ReplicationManager;
-import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
 import org.apache.hadoop.metrics2.MetricsCollector;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@@ -33,13 +30,6 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.ozone.OzoneConsts;
 
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState;
-
 /**
  * Class contains metrics related to ReplicationManager.
  */
@@ -61,28 +51,6 @@ public final class ReplicationManagerMetrics implements MetricsSource {
       "InflightMove",
       "Tracked inflight container move requests.");
 
-  // Setup metric names and descriptions for Container Lifecycle states
-  private static final Map<LifeCycleState, MetricsInfo> LIFECYCLE_STATE_METRICS
-      = Collections.unmodifiableMap(
-          new LinkedHashMap<LifeCycleState, MetricsInfo>() {{
-            for (LifeCycleState s : LifeCycleState.values()) {
-              String name = CaseFormat.UPPER_UNDERSCORE
-                  .to(CaseFormat.UPPER_CAMEL, s.toString());
-              String metric = "Num" + name + "Containers";
-              String description = "Containers in " + name + " state";
-              put(s, Interns.info(metric, description));
-            }
-          }});
-
-  // Setup metric names and descriptions for
-  private static final Map<HealthState, MetricsInfo>
-      CONTAINER_HEALTH_STATE_METRICS = Collections.unmodifiableMap(
-          new LinkedHashMap<HealthState, MetricsInfo>() {{
-            for (HealthState s :  HealthState.values()) {
-              put(s, Interns.info(s.getMetricName(), s.getDescription()));
-            }
-          }});
-
   @Metric("Number of replication commands sent.")
   private MutableCounterLong numReplicationCmdsSent;
 
@@ -142,16 +110,6 @@ public final class ReplicationManagerMetrics implements MetricsSource {
         .addGauge(INFLIGHT_DELETION, getInflightDeletion())
         .addGauge(INFLIGHT_MOVE, getInflightMove());
 
-    ReplicationManagerReport report = replicationManager.getContainerReport();
-    for (Map.Entry<HddsProtos.LifeCycleState, MetricsInfo> e :
-        LIFECYCLE_STATE_METRICS.entrySet()) {
-      builder.addGauge(e.getValue(), report.getStat(e.getKey()));
-    }
-    for (Map.Entry<ReplicationManagerReport.HealthState, MetricsInfo> e :
-        CONTAINER_HEALTH_STATE_METRICS.entrySet()) {
-      builder.addGauge(e.getValue(), report.getStat(e.getKey()));
-    }
-
     numReplicationCmdsSent.snapshot(builder, all);
     numReplicationCmdsCompleted.snapshot(builder, all);
     numReplicationCmdsTimeout.snapshot(builder, all);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index c254dd723e..bbf1c700d1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -358,7 +358,7 @@ public class ContainerStateMap {
 
     final ContainerQueryKey queryKey =
         new ContainerQueryKey(state, owner, repConfig);
-    if (resultCache.containsKey(queryKey)) {
+    if(resultCache.containsKey(queryKey)){
       return resultCache.get(queryKey);
     }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
index edea6816ae..5023e93a9e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
@@ -216,7 +216,7 @@ public final class HASecurityUtils {
       // Persist scm cert serial ID.
       scmStorageConfig.setScmCertSerialId(subSCMCertHolder.getSerialNumber()
           .toString());
-    } catch (InterruptedException | ExecutionException | IOException |
+    } catch (InterruptedException | ExecutionException| IOException |
         CertificateException  e) {
       LOG.error("Error while fetching/storing SCM signed certificate.", e);
       Thread.currentThread().interrupt();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java
index 4154b62125..e949850f6c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java
@@ -133,7 +133,7 @@ public final class RatisUtil {
                 ScmConfigKeys.OZONE_SCM_HA_RATIS_LEADER_ELECTION_TIMEOUT,
                 ScmConfigKeys.
                         OZONE_SCM_HA_RATIS_LEADER_ELECTION_TIMEOUT_DEFAULT,
-                TimeUnit.MILLISECONDS) + 200L,
+                TimeUnit.MILLISECONDS)+200L,
             TimeUnit.MILLISECONDS));
     Rpc.setSlownessTimeout(properties, TimeDuration.valueOf(
             ozoneConf.getTimeDuration(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
index bb12df6ec0..b07ee54147 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
@@ -68,7 +68,7 @@ public class SCMHAInvocationHandler implements InvocationHandler {
               invokeLocal(method, args);
       LOG.debug("Call: {} took {} ms", method, Time.monotonicNow() - startTime);
       return result;
-    } catch (InvocationTargetException iEx) {
+    } catch(InvocationTargetException iEx) {
       throw iEx.getCause();
     }
   }
@@ -88,8 +88,7 @@ public class SCMHAInvocationHandler implements InvocationHandler {
    */
   private Object invokeRatis(Method method, Object[] args)
       throws Exception {
-    LOG.trace("Invoking method {} on target {}", method, ratisHandler);
-    // TODO: Add metric here to track time taken by Ratis
+    long startTime = Time.monotonicNowNanos();
     Preconditions.checkNotNull(ratisHandler);
     SCMRatisRequest scmRatisRequest = SCMRatisRequest.of(requestType,
         method.getName(), method.getParameterTypes(), args);
@@ -100,7 +99,7 @@ public class SCMHAInvocationHandler implements InvocationHandler {
     // via ratis. So, in this special scenario we use RaftClient.
     final SCMRatisResponse response;
     if (method.getName().equals("storeValidCertificate") &&
-        args[args.length - 1].equals(HddsProtos.NodeType.SCM)) {
+        args[args.length -1].equals(HddsProtos.NodeType.SCM)) {
       response =
           HASecurityUtils.submitScmCertsToRatis(
               ratisHandler.getDivision().getGroup(),
@@ -111,6 +110,8 @@ public class SCMHAInvocationHandler implements InvocationHandler {
       response = ratisHandler.submitRequest(
           scmRatisRequest);
     }
+    LOG.info("Invoking method {} on target {}, cost {}us",
+        method, ratisHandler, (Time.monotonicNowNanos() - startTime) / 1000.0);
 
     if (response.isSuccess()) {
       return response.getResult();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
index a69f2cbbb4..3821575459 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
@@ -73,7 +73,6 @@ public class SCMHAManagerImpl implements SCMHAManager {
       final StorageContainerManager scm) throws IOException {
     this.conf = conf;
     this.scm = scm;
-    this.exitManager = new ExitManager();
     if (SCMHAUtils.isSCMHAEnabled(conf)) {
       this.transactionBuffer = new SCMHADBTransactionBufferImpl(scm);
       this.ratisServer = new SCMRatisServerImpl(conf, scm,
@@ -259,7 +258,7 @@ public class SCMHAManagerImpl implements SCMHAManager {
       throw e;
     }
 
-    File dbBackup;
+    File dbBackup = null;
     try {
       dbBackup = HAUtils
           .replaceDBWithCheckpoint(lastAppliedIndex, oldDBLocation,
@@ -267,41 +266,29 @@ public class SCMHAManagerImpl implements SCMHAManager {
       LOG.info("Replaced DB with checkpoint, term: {}, index: {}",
           term, lastAppliedIndex);
     } catch (Exception e) {
-      // If we are not able to install latest checkpoint we should throw
-      // this exception. In this way reinitialize can throw exception to
-      // ratis to handle properly.
       LOG.error("Failed to install Snapshot as SCM failed to replace"
-          + " DB with downloaded checkpoint. Checkpoint transaction {}", e,
-          checkpointTxnInfo.getTransactionIndex());
-      throw e;
+          + " DB with downloaded checkpoint. Reloading old SCM state.", e);
     }
-
     // Reload the DB store with the new checkpoint.
+    // Restart (unpause) the state machine and update its last applied index
+    // to the installed checkpoint's snapshot index.
     try {
       reloadSCMState();
       LOG.info("Reloaded SCM state with Term: {} and Index: {}", term,
           lastAppliedIndex);
     } catch (Exception ex) {
-      LOG.info("Failed to reload SCM state with Term: {} and Index: {}", term,
-          lastAppliedIndex);
-      // revert to the old db, since the new db may be a corrupted one
-      // so that SCM can restart from the old db.
       try {
+        // revert to the old db, since the new db may be a corrupted one,
+        // so that SCM can restart from the old db.
         if (dbBackup != null) {
-          dbBackup =
-              HAUtils.replaceDBWithCheckpoint(lastAppliedIndex, oldDBLocation,
+          dbBackup = HAUtils
+              .replaceDBWithCheckpoint(lastAppliedIndex, oldDBLocation,
                   dbBackup.toPath(), OzoneConsts.SCM_DB_BACKUP_PREFIX);
-          LOG.error("Replacing SCM state with Term : {} and Index:",
-              termIndex.getTerm(), termIndex.getTerm());
-          // This is being done to check before stop with old db
-          // try to reload and then finally terminate and also test has
-          // assumption for re-verify after corrupt DB loading without
-          // reloadSCMState call test fails with NPE when finding db location.
-          reloadSCMState();
+          startServices();
         }
       } finally {
-        String errorMsg = "Failed to reload SCM state and instantiate " +
-            "services.";
+        String errorMsg =
+            "Failed to reload SCM state and instantiate services.";
         exitManager.exitSystem(1, errorMsg, ex, LOG);
       }
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
index b48dfb616d..f63930056b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
@@ -258,8 +258,7 @@ public class SCMRatisServerImpl implements SCMRatisServer {
               peer.getAddress().concat(isLocal ?
                   ":".concat(RaftProtos.RaftPeerRole.LEADER.toString()) :
                   ":".concat(RaftProtos.RaftPeerRole.FOLLOWER.toString()))
-                  .concat(":".concat(peer.getId().toString()))
-                  .concat(":".concat(peerInetAddress.getHostAddress()))));
+                  .concat(":".concat(peer.getId().toString()))));
     }
     return ratisRoles;
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
index 9aeda10225..8fa1866d5b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
@@ -357,7 +357,7 @@ public class SCMStateMachine extends BaseStateMachine {
   }
 
   @Override
-  public void reinitialize() throws IOException {
+  public void reinitialize() {
     Preconditions.checkNotNull(installingDBCheckpoint);
     DBCheckpoint checkpoint = installingDBCheckpoint;
 
@@ -369,8 +369,8 @@ public class SCMStateMachine extends BaseStateMachine {
       termIndex =
           scm.getScmHAManager().installCheckpoint(checkpoint);
     } catch (Exception e) {
-      LOG.error("Failed to reinitialize SCMStateMachine.", e);
-      throw new IOException(e);
+      LOG.error("Failed to reinitialize SCMStateMachine.");
+      return;
     }
 
     // re-initialize the DBTransactionBuffer and update the lastAppliedIndex.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java
index 9fb771b7a7..95d906e738 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java
@@ -47,7 +47,7 @@ public final class CodecFactory {
     codecs.put(X509Certificate.class, new X509CertificateCodec());
   }
 
-  private CodecFactory() { }
+  private CodecFactory() {}
 
   public static Codec getCodec(Class<?> type)
       throws InvalidProtocolBufferException {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
index de7fcb0b74..799e128202 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
@@ -238,11 +238,11 @@ public class SCMMetadataStoreImpl implements SCMMetadataStore {
 
   @Override
   public TableIterator getAllCerts(CertificateStore.CertType certType) {
-    if (certType == CertificateStore.CertType.VALID_CERTS) {
+    if(certType == CertificateStore.CertType.VALID_CERTS) {
       return validCertsTable.iterator();
     }
 
-    if (certType == CertificateStore.CertType.REVOKED_CERTS) {
+    if(certType == CertificateStore.CertType.REVOKED_CERTS) {
       return revokedCertsTable.iterator();
     }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
index bf2559b8ab..9bfa7d6c4c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
@@ -44,7 +44,7 @@ public class X509CertificateCodec implements Codec<X509Certificate> {
   @Override
   public X509Certificate fromPersistedFormat(byte[] rawData)
       throws IOException {
-    try {
+    try{
       String s = new String(rawData, StandardCharsets.UTF_8);
       return CertificateCodec.getX509Certificate(s);
     } catch (CertificateException exp) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
index aa930251c4..eb6dc0d424 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
@@ -91,7 +91,7 @@ public class CommandQueue {
     try {
       Commands cmds = commandMap.remove(datanodeUuid);
       List<SCMCommand> cmdList = null;
-      if (cmds != null) {
+      if(cmds != null) {
         cmdList = cmds.getCommands();
         commandsInQueue -= cmdList.size() > 0 ? cmdList.size() : 0;
         // A post condition really.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
index 47d7c53469..27a84deaff 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
@@ -85,20 +85,20 @@ public class NodeDecommissionManager {
       return port;
     }
 
-    private void parseHostname() throws InvalidHostStringException {
+    private void parseHostname() throws InvalidHostStringException{
       try {
         // A URI *must* have a scheme, so just create a fake one
-        URI uri = new URI("empty://" + rawHostname.trim());
+        URI uri = new URI("empty://"+rawHostname.trim());
         this.hostname = uri.getHost();
         this.port = uri.getPort();
 
         if (this.hostname == null) {
-          throw new InvalidHostStringException("The string " + rawHostname +
+          throw new InvalidHostStringException("The string "+rawHostname+
               " does not contain a value hostname or hostname:port definition");
         }
       } catch (URISyntaxException e) {
         throw new InvalidHostStringException(
-            "Unable to parse the hoststring " + rawHostname, e);
+            "Unable to parse the hoststring "+rawHostname, e);
       }
     }
   }
@@ -138,7 +138,7 @@ public class NodeDecommissionManager {
         results.add(found.get(0));
       } else if (found.size() > 1) {
         DatanodeDetails match = null;
-        for (DatanodeDetails dn : found) {
+        for(DatanodeDetails dn : found) {
           if (validateDNPortMatch(host.getPort(), dn)) {
             match = dn;
             break;
@@ -231,7 +231,7 @@ public class NodeDecommissionManager {
         // NodeNotFoundException here expect if the node is remove in the
         // very short window between validation and starting decom. Therefore
         // log a warning and ignore the exception
-        LOG.warn("The host {} was not found in SCM. Ignoring the request to " +
+        LOG.warn("The host {} was not found in SCM. Ignoring the request to "+
             "decommission it", dn.getHostName());
         errors.add(new DatanodeAdminError(dn.getHostName(),
             "The host was not found in SCM"));
@@ -274,12 +274,12 @@ public class NodeDecommissionManager {
           dn, NodeOperationalState.DECOMMISSIONING);
       monitor.startMonitoring(dn);
     } else if (nodeStatus.isDecommission()) {
-      LOG.info("Start Decommission called on node {} in state {}. Nothing to " +
+      LOG.info("Start Decommission called on node {} in state {}. Nothing to "+
           "do.", dn, opState);
     } else {
       LOG.error("Cannot decommission node {} in state {}", dn, opState);
-      throw new InvalidNodeStateException("Cannot decommission node " +
-          dn + " in state " + opState);
+      throw new InvalidNodeStateException("Cannot decommission node "+
+          dn +" in state "+ opState);
     }
   }
 
@@ -296,7 +296,7 @@ public class NodeDecommissionManager {
         // NodeNotFoundException here expect if the node is remove in the
         // very short window between validation and starting decom. Therefore
         // log a warning and ignore the exception
-        LOG.warn("Host {} was not found in SCM. Ignoring the request to " +
+        LOG.warn("Host {} was not found in SCM. Ignoring the request to "+
             "recommission it.", dn.getHostName());
         errors.add(new DatanodeAdminError(dn.getHostName(),
             "The host was not found in SCM"));
@@ -306,7 +306,7 @@ public class NodeDecommissionManager {
   }
 
   public synchronized void recommission(DatanodeDetails dn)
-      throws NodeNotFoundException {
+      throws NodeNotFoundException{
     NodeStatus nodeStatus = getNodeStatus(dn);
     NodeOperationalState opState = nodeStatus.getOperationalState();
     if (opState != NodeOperationalState.IN_SERVICE) {
@@ -315,7 +315,7 @@ public class NodeDecommissionManager {
       monitor.stopMonitoring(dn);
       LOG.info("Queued node {} for recommission", dn);
     } else {
-      LOG.info("Recommission called on node {} with state {}. " +
+      LOG.info("Recommission called on node {} with state {}. "+
           "Nothing to do.", dn, opState);
     }
   }
@@ -333,7 +333,7 @@ public class NodeDecommissionManager {
         // NodeNotFoundException here expect if the node is remove in the
         // very short window between validation and starting decom. Therefore
         // log a warning and ignore the exception
-        LOG.warn("The host {} was not found in SCM. Ignoring the request to " +
+        LOG.warn("The host {} was not found in SCM. Ignoring the request to "+
             "start maintenance on it", dn.getHostName());
       } catch (InvalidNodeStateException e) {
         errors.add(new DatanodeAdminError(dn.getHostName(), e.getMessage()));
@@ -360,12 +360,12 @@ public class NodeDecommissionManager {
       monitor.startMonitoring(dn);
       LOG.info("Starting Maintenance for node {}", dn);
     } else if (nodeStatus.isMaintenance()) {
-      LOG.info("Starting Maintenance called on node {} with state {}. " +
+      LOG.info("Starting Maintenance called on node {} with state {}. "+
           "Nothing to do.", dn, opState);
     } else {
       LOG.error("Cannot start maintenance on node {} in state {}", dn, opState);
-      throw new InvalidNodeStateException("Cannot start maintenance on node " +
-          dn + " in state " + opState);
+      throw new InvalidNodeStateException("Cannot start maintenance on node "+
+          dn +" in state "+ opState);
     }
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 6127bb0b4d..4e1a9649ea 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -324,7 +324,7 @@ public interface NodeManager extends StorageContainerNodeProtocol,
     return null;
   }
 
-  default HDDSLayoutVersionManager getLayoutVersionManager() {
+  default HDDSLayoutVersionManager getLayoutVersionManager(){
     return null;
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index 85dd6a0673..e752454e00 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -728,7 +728,7 @@ public class NodeStateManager implements Runnable, Closeable {
         (lastHbTime) -> lastHbTime < staleNodeDeadline;
 
     try {
-      for (DatanodeInfo node : nodeStateMap.getAllDatanodeInfos()) {
+      for(DatanodeInfo node : nodeStateMap.getAllDatanodeInfos()) {
         NodeStatus status = nodeStateMap.getNodeStatus(node.getUuid());
         switch (status.getHealth()) {
         case HEALTHY:
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
index 03dd2e2b6d..a9164c7297 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
@@ -209,8 +209,8 @@ public class NodeStatus {
 
   @Override
   public String toString() {
-    return "OperationalState: " + operationalState + " Health: " + health +
-        " OperationStateExpiry: " + opStateExpiryEpochSeconds;
+    return "OperationalState: "+operationalState+" Health: "+health+
+        " OperationStateExpiry: "+opStateExpiryEpochSeconds;
   }
 
 }
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 8899b13b6f..68c2697237 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -268,7 +268,7 @@ public class SCMNodeManager implements NodeManager {
    */
   @Override
   public void setNodeOperationalState(DatanodeDetails datanodeDetails,
-      NodeOperationalState newState) throws NodeNotFoundException {
+      NodeOperationalState newState) throws NodeNotFoundException{
     setNodeOperationalState(datanodeDetails, newState, 0);
   }
 
@@ -283,7 +283,7 @@ public class SCMNodeManager implements NodeManager {
   @Override
   public void setNodeOperationalState(DatanodeDetails datanodeDetails,
       NodeOperationalState newState, long opStateExpiryEpocSec)
-      throws NodeNotFoundException {
+      throws NodeNotFoundException{
     nodeStateManager.setNodeOperationalState(
         datanodeDetails, newState, opStateExpiryEpocSec);
   }
@@ -612,7 +612,7 @@ public class SCMNodeManager implements NodeManager {
         // send Finalize command multiple times.
         scmNodeEventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND,
             new CommandForDatanode<>(datanodeDetails.getUuid(), finalizeCmd));
-      } catch (NotLeaderException ex) {
+      } catch(NotLeaderException ex) {
         LOG.warn("Skip sending finalize upgrade command since current SCM is" +
             "not leader.", ex);
       }
@@ -764,7 +764,7 @@ public class SCMNodeManager implements NodeManager {
     for (DatanodeInfo dni : nodeStateManager.getAllNodes()) {
       NodeStatus status = dni.getNodeStatus();
       nodes.get(status.getOperationalState().name())
-          .compute(status.getHealth().name(), (k, v) -> v + 1);
+          .compute(status.getHealth().name(), (k, v) -> v+1);
     }
     return nodes;
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
index b727580d1b..6eb73595d2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
@@ -129,12 +129,12 @@ public final class SCMNodeMetrics implements MetricsSource {
      *     ...
      */
     MetricsRecordBuilder metrics = collector.addRecord(registry.info());
-    for (Map.Entry<String, Map<String, Integer>> e : nodeCount.entrySet()) {
-      for (Map.Entry<String, Integer> h : e.getValue().entrySet()) {
+    for(Map.Entry<String, Map<String, Integer>> e : nodeCount.entrySet()) {
+      for(Map.Entry<String, Integer> h : e.getValue().entrySet()) {
         metrics.addGauge(
             Interns.info(
-                StringUtils.camelize(e.getKey() + "_" + h.getKey() + "_nodes"),
-                "Number of " + e.getKey() + " " + h.getKey() + " datanodes"),
+                StringUtils.camelize(e.getKey()+"_"+h.getKey()+"_nodes"),
+                "Number of "+e.getKey()+" "+h.getKey()+" datanodes"),
             h.getValue());
       }
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index ed45ed06eb..1b0e5b56e7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -136,7 +136,7 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
... 27009 lines suppressed ...


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org