You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by el...@apache.org on 2020/07/01 16:48:51 UTC

[hadoop-ozone] branch master updated: HDDS-2413. Set configuration variables from annotated java objects (#1106)

This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 0300feb  HDDS-2413. Set configuration variables from annotated java objects (#1106)
0300feb is described below

commit 0300febe2cf04bbe2769aab51bd0253b4af99a25
Author: Doroszlai, Attila <64...@users.noreply.github.com>
AuthorDate: Wed Jul 1 18:48:35 2020 +0200

    HDDS-2413. Set configuration variables from annotated java objects (#1106)
---
 .../hadoop/hdds/scm/XceiverClientManager.java      |  4 +-
 .../hadoop/hdds/scm/client/HddsClientUtils.java    |  3 +-
 .../hadoop/hdds/conf/OzoneConfiguration.java       | 21 +++++-
 .../java/org/apache/hadoop/hdds/fs/DUFactory.java  | 19 ++---
 .../hdds/fs/DedicatedDiskSpaceUsageFactory.java    |  3 +-
 .../hadoop/hdds/fs/SpaceUsageCheckFactory.java     |  3 +-
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |  3 +-
 .../utils/LegacyHadoopConfigurationSource.java     |  4 +-
 .../hadoop/hdds/conf/SimpleConfiguration.java      |  2 +-
 .../hdds/conf/SimpleConfigurationParent.java       |  6 +-
 .../hadoop/hdds/conf/TestOzoneConfiguration.java   | 85 ++++++++++++++++++++--
 .../org/apache/hadoop/hdds/fs/TestDUFactory.java   | 14 ++--
 .../hdds/conf/ConfigurationReflectionUtil.java     | 80 +++++++++++++++++++-
 .../hadoop/hdds/conf/ConfigurationSource.java      |  7 +-
 .../hadoop/hdds/conf/ConfigurationTarget.java      | 54 ++++++++++++++
 .../hdds/conf/MutableConfigurationSource.java}     | 15 +---
 .../hadoop/ozone/HddsDatanodeHttpServer.java       |  7 +-
 .../server/ratis/ContainerStateMachine.java        |  3 +-
 .../container/common/TestBlockDeletingService.java |  3 +-
 .../ozoneimpl/TestContainerScrubberMetrics.java    |  5 +-
 .../hdds/conf/DatanodeRatisServerConfig.java       | 36 ++++-----
 .../hadoop/hdds/server/http/BaseHttpServer.java    | 10 +--
 .../apache/hadoop/hdds/server/http/HttpConfig.java |  6 +-
 .../hadoop/hdds/server/http/HttpServer2.java       |  7 +-
 .../hdds/scm/container/ReplicationManager.java     | 15 ++--
 .../server/StorageContainerManagerHttpServer.java  |  6 +-
 .../hdds/scm/container/TestReplicationManager.java |  4 +-
 .../hdds/scm/cli/ContainerOperationClient.java     |  4 +-
 .../hadoop/ozone/client/OzoneClientFactory.java    |  3 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  4 +-
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |  7 +-
 .../ozone/om/ha/OMFailoverProxyProvider.java       |  2 +-
 .../hadoop/ozone/om/helpers/TestOzoneAclUtil.java  | 12 +--
 .../apache/hadoop/ozone/MiniOzoneChaosCluster.java |  9 ++-
 .../hadoop/fs/ozone/contract/OzoneContract.java    | 15 ++--
 .../ozone/contract/rooted/RootedOzoneContract.java | 15 ++--
 .../hadoop/hdds/scm/pipeline/TestNodeFailure.java  | 20 ++---
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |  9 ++-
 .../ozone/client/rpc/Test2WayCommitInRatis.java    | 15 ++--
 .../rpc/TestBlockOutputStreamWithFailures.java     | 15 ++--
 ...estBlockOutputStreamWithFailuresFlushDelay.java | 15 ++--
 .../hadoop/ozone/client/rpc/TestCommitWatcher.java | 15 ++--
 .../rpc/TestContainerReplicationEndToEnd.java      | 22 +++---
 .../TestContainerStateMachineFailureOnRead.java    | 25 ++-----
 .../rpc/TestContainerStateMachineFailures.java     | 15 ++--
 .../client/rpc/TestDeleteWithSlowFollower.java     | 25 ++-----
 .../client/rpc/TestFailureHandlingByClient.java    | 15 ++--
 .../rpc/TestFailureHandlingByClientFlushDelay.java | 15 ++--
 .../rpc/TestMultiBlockWritesWithDnFailures.java    | 15 ++--
 .../client/rpc/TestValidateBCSIDOnRestart.java     | 15 ++--
 .../ozone/client/rpc/TestWatchForCommit.java       | 15 ++--
 .../hadoop/ozone/freon/TestDataValidate.java       | 15 ++--
 .../freon/TestDataValidateWithDummyContainers.java |  4 -
 .../ozone/freon/TestFreonWithDatanodeRestart.java  | 15 ++--
 .../ozone/freon/TestFreonWithPipelineDestroy.java  | 15 ++--
 .../hadoop/ozone/freon/TestRandomKeyGenerator.java | 15 ++--
 .../ozone/om/TestOzoneManagerRocksDBLogging.java   |  6 +-
 .../apache/hadoop/ozone/recon/TestReconTasks.java  |  8 +-
 .../ozone/recon/TestReconWithOzoneManagerHA.java   |  8 +-
 .../hadoop/ozone/om/OzoneManagerHttpServer.java    |  8 +-
 .../om/snapshot/OzoneManagerSnapshotProvider.java  |  4 +-
 .../org/apache/hadoop/ozone/om/TestOMStorage.java  |  6 +-
 .../ozone/recon/codegen/ReconSqlDbConfig.java      |  7 --
 .../ozone/recon/fsck/ContainerHealthTask.java      |  4 +-
 .../hadoop/ozone/recon/scm/PipelineSyncTask.java   |  4 +-
 .../hadoop/ozone/recon/tasks/ReconTaskConfig.java  | 26 +++----
 .../ozone/recon/fsck/TestContainerHealthTask.java  |  3 +-
 .../hadoop/ozone/s3/S3GatewayHttpServer.java       |  4 +-
 .../org/apache/hadoop/ozone/admin/OzoneAdmin.java  |  4 +-
 .../apache/hadoop/ozone/freon/FreonHttpServer.java |  6 +-
 70 files changed, 524 insertions(+), 385 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index 8cc6e8d..430e6e2 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigType;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
@@ -86,8 +85,7 @@ public class XceiverClientManager implements Closeable {
    * @param conf configuration
    */
   public XceiverClientManager(ConfigurationSource conf) throws IOException {
-    this(conf, OzoneConfiguration.of(conf).getObject(ScmClientConfig.class),
-        null);
+    this(conf, conf.getObject(ScmClientConfig.class), null);
   }
 
   public XceiverClientManager(ConfigurationSource conf,
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index 29480a5..bec5a47 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -32,7 +32,6 @@ import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.io.retry.RetryPolicies;
@@ -250,7 +249,7 @@ public final class HddsClientUtils {
    * Standalone and Ratis client.
    */
   public static int getMaxOutstandingRequests(ConfigurationSource config) {
-    return OzoneConfiguration.of(config)
+    return config
         .getObject(RatisClientConfig.RaftConfig.class)
         .getMaxOutstandingRequests();
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index 7176b05..9cfe0f6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -47,7 +47,7 @@ import com.google.common.base.Preconditions;
  */
 @InterfaceAudience.Private
 public class OzoneConfiguration extends Configuration
-    implements ConfigurationSource {
+    implements MutableConfigurationSource {
   static {
     activate();
   }
@@ -72,6 +72,25 @@ public class OzoneConfiguration extends Configuration
         : new OzoneConfiguration(conf);
   }
 
+  /**
+   * @return a new config object of type {@code T} configured with defaults
+   * and any overrides from XML
+   */
+  public static <T> T newInstanceOf(Class<T> configurationClass) {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    return conf.getObject(configurationClass);
+  }
+
+  /**
+   * @return a new {@code OzoneConfiguration} instance set from the given
+   * {@code configObject}
+   */
+  public static <T> OzoneConfiguration fromObject(T configObject) {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setFromObject(configObject);
+    return conf;
+  }
+
   public OzoneConfiguration() {
     OzoneConfiguration.activate();
     loadDefaults();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java
index 5b57b34..19eea03 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.ConfigType;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
 /**
  * Uses DU for all volumes.  Saves used value in cache file.
@@ -35,14 +34,12 @@ public class DUFactory implements SpaceUsageCheckFactory {
   private static final String DU_CACHE_FILE = "scmUsed";
   private static final String EXCLUDE_PATTERN = "*.tmp.*";
 
-  private static final String CONFIG_PREFIX = "hdds.datanode.du";
-
   private Conf conf;
 
   @Override
   public SpaceUsageCheckFactory setConfiguration(
       ConfigurationSource configuration) {
-    conf = OzoneConfiguration.of(configuration).getObject(Conf.class);
+    conf = configuration.getObject(Conf.class);
     return this;
   }
 
@@ -61,13 +58,11 @@ public class DUFactory implements SpaceUsageCheckFactory {
   /**
    * Configuration for {@link DUFactory}.
    */
-  @ConfigGroup(prefix = CONFIG_PREFIX)
+  @ConfigGroup(prefix = "hdds.datanode.du")
   public static class Conf {
 
-    private static final String REFRESH_PERIOD = "refresh.period";
-
     @Config(
-        key = REFRESH_PERIOD,
+        key = "refresh.period",
         defaultValue = "1h",
         type = ConfigType.TIME,
         tags = { ConfigTag.DATANODE },
@@ -76,16 +71,12 @@ public class DUFactory implements SpaceUsageCheckFactory {
     )
     private long refreshPeriod;
 
-    public void setRefreshPeriod(long millis) {
-      refreshPeriod = millis;
+    public void setRefreshPeriod(Duration duration) {
+      refreshPeriod = duration.toMillis();
     }
 
     public Duration getRefreshPeriod() {
       return Duration.ofMillis(refreshPeriod);
     }
-
-    static String configKeyForRefreshPeriod() {
-      return CONFIG_PREFIX + "." + REFRESH_PERIOD;
-    }
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java
index 3ed74c9..2292a73 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.ConfigType;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
 /**
  * Uses DedicatedDiskSpaceUsage for all volumes.  Does not save results since
@@ -40,7 +39,7 @@ public class DedicatedDiskSpaceUsageFactory implements SpaceUsageCheckFactory {
   @Override
   public SpaceUsageCheckFactory setConfiguration(
       ConfigurationSource configuration) {
-    conf = OzoneConfiguration.of(configuration).getObject(Conf.class);
+    conf = configuration.getObject(Conf.class);
     return this;
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java
index 0205de5..ec13bb9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -70,7 +69,7 @@ public interface SpaceUsageCheckFactory {
    * instantiated.
    */
   static SpaceUsageCheckFactory create(ConfigurationSource config) {
-    Conf conf = OzoneConfiguration.of(config).getObject(Conf.class);
+    Conf conf = config.getObject(Conf.class);
     Class<? extends SpaceUsageCheckFactory> aClass = null;
     String className = conf.getClassName();
     if (className != null && !className.isEmpty()) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index ef8eb9d..47523bc 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -30,7 +30,6 @@ import java.util.stream.Collectors;
 
 import org.apache.hadoop.hdds.StringUtils;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -314,7 +313,7 @@ public final class RatisHelper {
    * ---------------------------------------------------------------------------
    */
   public static RetryPolicy createRetryPolicy(ConfigurationSource conf) {
-    RatisClientConfig ratisClientConfig = OzoneConfiguration.of(conf)
+    RatisClientConfig ratisClientConfig = conf
         .getObject(RatisClientConfig.class);
     ExponentialBackoffRetry exponentialBackoffRetry =
         createExponentialBackoffPolicy(ratisClientConfig);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LegacyHadoopConfigurationSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LegacyHadoopConfigurationSource.java
index 44f2f9e..fcf6313 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LegacyHadoopConfigurationSource.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LegacyHadoopConfigurationSource.java
@@ -22,11 +22,13 @@ import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 
 /**
  * Configuration source to wrap Hadoop Configuration object.
  */
-public class LegacyHadoopConfigurationSource implements ConfigurationSource {
+public class LegacyHadoopConfigurationSource
+    implements MutableConfigurationSource {
 
   private Configuration configuration;
 
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
index d518aa8..9a8702f 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java
@@ -45,7 +45,7 @@ public class SimpleConfiguration extends SimpleConfigurationParent {
   @Config(key = "wait", type = ConfigType.TIME, timeUnit =
       TimeUnit.SECONDS, defaultValue = "30m", description = "Wait time (To "
       + "test TIME config type)", tags = ConfigTag.MANAGEMENT)
-  private long waitTime = 1;
+  private long waitTime;
 
   @PostConstruct
   public void validate() {
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfigurationParent.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfigurationParent.java
index 92f4c12..85ef7ba 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfigurationParent.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfigurationParent.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -29,4 +29,8 @@ public class SimpleConfigurationParent {
   public boolean isEnabled() {
     return enabled;
   }
+
+  public void setEnabled(boolean enabled) {
+    this.enabled = enabled;
+  }
 }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
index 5ab16ab..47c73df 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -74,8 +74,8 @@ public class TestOzoneConfiguration {
 
       Path fileResource = new Path(coreDefault.getAbsolutePath());
       conf.addResource(fileResource);
-      Assert.assertEquals(conf.getAllPropertiesByTag("MYCUSTOMTAG")
-          .getProperty("dfs.random.key"), "XYZ");
+      Assert.assertEquals("XYZ", conf.getAllPropertiesByTag("MYCUSTOMTAG")
+          .getProperty("dfs.random.key"));
     }
 
     try (BufferedWriter out = new BufferedWriter(new FileWriter(coreSite))) {
@@ -112,7 +112,7 @@ public class TestOzoneConfiguration {
 
     Assert.assertEquals("host", configuration.getBindHost());
     Assert.assertEquals("address", configuration.getClientAddress());
-    Assert.assertEquals(true, configuration.isEnabled());
+    Assert.assertTrue(configuration.isEnabled());
     Assert.assertEquals(5555, configuration.getPort());
     Assert.assertEquals(600, configuration.getWaitTime());
   }
@@ -124,17 +124,88 @@ public class TestOzoneConfiguration {
     SimpleConfiguration configuration =
         ozoneConfiguration.getObject(SimpleConfiguration.class);
 
-    Assert.assertEquals(true, configuration.isEnabled());
+    Assert.assertTrue(configuration.isEnabled());
     Assert.assertEquals(9878, configuration.getPort());
   }
 
+  @Test
+  public void setConfigFromObject() {
+    // GIVEN
+    SimpleConfiguration object = new SimpleConfiguration();
+    object.setBindHost("host");
+    object.setClientAddress("address");
+    object.setEnabled(true);
+    object.setPort(5555);
+    object.setWaitTime(600);
+
+    OzoneConfiguration subject = new OzoneConfiguration();
+
+    // WHEN
+    subject.setFromObject(object);
+
+    // THEN
+    Assert.assertEquals(object.getBindHost(),
+        subject.get("test.scm.client.bind.host"));
+    Assert.assertEquals(object.getClientAddress(),
+        subject.get("test.scm.client.address"));
+    Assert.assertEquals(object.isEnabled(),
+        subject.getBoolean("test.scm.client.enabled", false));
+    Assert.assertEquals(object.getPort(),
+        subject.getInt("test.scm.client.port", 0));
+    Assert.assertEquals(TimeUnit.SECONDS.toMinutes(object.getWaitTime()),
+        subject.getTimeDuration("test.scm.client.wait", 0, TimeUnit.MINUTES));
+  }
+
+  @Test
+  public void setConfigFromObjectWithConfigDefaults() {
+    // GIVEN
+    OzoneConfiguration subject = new OzoneConfiguration();
+    SimpleConfiguration object = subject.getObject(SimpleConfiguration.class);
+
+    // WHEN
+    subject.setFromObject(object);
+
+    // THEN
+    Assert.assertEquals("0.0.0.0",
+        subject.get("test.scm.client.bind.host"));
+    Assert.assertEquals("localhost",
+        subject.get("test.scm.client.address"));
+    Assert.assertTrue(
+        subject.getBoolean("test.scm.client.enabled", false));
+    Assert.assertEquals(9878,
+        subject.getInt("test.scm.client.port", 123));
+    Assert.assertEquals(TimeUnit.MINUTES.toSeconds(30),
+        subject.getTimeDuration("test.scm.client.wait", 555, TimeUnit.SECONDS));
+  }
+
+  @Test
+  public void setConfigFromObjectWithObjectDefaults() {
+    // GIVEN
+    SimpleConfiguration object = new SimpleConfiguration();
+    OzoneConfiguration subject = new OzoneConfiguration();
+
+    // WHEN
+    subject.setFromObject(object);
+
+    // THEN
+    Assert.assertEquals("0.0.0.0",
+        subject.get("test.scm.client.bind.host"));
+    Assert.assertEquals("localhost",
+        subject.get("test.scm.client.address"));
+    Assert.assertFalse(
+        subject.getBoolean("test.scm.client.enabled", false));
+    Assert.assertEquals(0,
+        subject.getInt("test.scm.client.port", 123));
+    Assert.assertEquals(0,
+        subject.getTimeDuration("test.scm.client.wait", 555, TimeUnit.SECONDS));
+  }
+
   @Test(expected = NumberFormatException.class)
   public void postConstructValidation() {
     OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
     ozoneConfiguration.setInt("test.scm.client.port", -3);
 
-    SimpleConfiguration configuration =
-        ozoneConfiguration.getObject(SimpleConfiguration.class);
+    ozoneConfiguration.getObject(SimpleConfiguration.class);
   }
 
 
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
index 7a8701e..0aae44c 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
@@ -21,10 +21,9 @@ import java.io.File;
 import java.time.Duration;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.junit.Test;
 
-import static org.apache.hadoop.hdds.fs.DUFactory.Conf.configKeyForRefreshPeriod;
 import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
-import org.junit.Test;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertSame;
 
@@ -40,16 +39,21 @@ public class TestDUFactory {
 
   @Test
   public void testParams() {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(configKeyForRefreshPeriod(), "1h");
     File dir = getTestDir(getClass().getSimpleName());
+    Duration refresh = Duration.ofHours(1);
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+
+    DUFactory.Conf duConf = conf.getObject(DUFactory.Conf.class);
+    duConf.setRefreshPeriod(refresh);
+    conf.setFromObject(duConf);
 
     SpaceUsageCheckParams params = new DUFactory()
         .setConfiguration(conf)
         .paramsFor(dir);
 
     assertSame(dir, params.getDir());
-    assertEquals(Duration.ofHours(1), params.getRefresh());
+    assertEquals(refresh, params.getRefresh());
     assertSame(DU.class, params.getSource().getClass());
     assertSame(SaveSpaceUsageToFile.class, params.getPersistence().getClass());
   }
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
index 8d3b4f2..48f0549 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -21,6 +21,8 @@ import java.lang.reflect.Field;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
+import java.util.Deque;
+import java.util.LinkedList;
 
 /**
  * Reflection utilities for configuration injection.
@@ -162,4 +164,80 @@ public final class ConfigurationReflectionUtil {
       }
     }
   }
+
+  public static <T> void updateConfiguration(ConfigurationTarget config,
+      T object, String prefix) {
+
+    Class<?> configClass = object.getClass();
+    Deque<Class<?>> classes = new LinkedList<>();
+    classes.addLast(configClass);
+    Class<?> superclass = configClass.getSuperclass();
+    while (superclass != null) {
+      classes.addFirst(superclass);
+      superclass = superclass.getSuperclass();
+    }
+
+    for (Class<?> cl : classes) {
+      updateConfigurationFromObject(config, cl, object, prefix);
+    }
+  }
+
+  private static <T> void updateConfigurationFromObject(
+      ConfigurationTarget config, Class<?> configClass, T configObject,
+      String prefix) {
+
+    for (Field field : configClass.getDeclaredFields()) {
+      if (field.isAnnotationPresent(Config.class)) {
+        Config configAnnotation = field.getAnnotation(Config.class);
+        String fieldLocation = configClass + "." + field.getName();
+        String key = prefix + "." + configAnnotation.key();
+        ConfigType type = configAnnotation.type();
+
+        if (type == ConfigType.AUTO) {
+          type = detectConfigType(field.getType(), fieldLocation);
+        }
+
+        //Note: default value is handled by ozone-default.xml. Here we can
+        //use any default.
+        boolean accessChanged = false;
+        try {
+          if (!field.isAccessible()) {
+            field.setAccessible(true);
+            accessChanged = true;
+          }
+          switch (type) {
+          case STRING:
+            Object value = field.get(configObject);
+            if (value != null) {
+              config.set(key, String.valueOf(value));
+            }
+            break;
+          case INT:
+            config.setInt(key, field.getInt(configObject));
+            break;
+          case BOOLEAN:
+            config.setBoolean(key, field.getBoolean(configObject));
+            break;
+          case LONG:
+            config.setLong(key, field.getLong(configObject));
+            break;
+          case TIME:
+            config.setTimeDuration(key, field.getLong(configObject),
+                configAnnotation.timeUnit());
+            break;
+          default:
+            throw new ConfigurationException(
+                "Unsupported ConfigType " + type + " on " + fieldLocation);
+          }
+        } catch (IllegalAccessException e) {
+          throw new ConfigurationException(
+              "Can't inject configuration to " + fieldLocation, e);
+        } finally {
+          if (accessChanged) {
+            field.setAccessible(false);
+          }
+        }
+      }
+    }
+  }
 }
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
index 85d2b0b..9f4d879 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
@@ -25,7 +25,7 @@ import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
 /**
- * Lightweight interface to defined the contract of the Configuration objects.
+ * Defines read-only contract of Configuration objects.
  */
 public interface ConfigurationSource {
 
@@ -37,11 +37,6 @@ public interface ConfigurationSource {
 
   char[] getPassword(String key) throws IOException;
 
-  @Deprecated
-    //TODO: user read only configs and don't use it to store actual port
-    // numbers.
-  void set(String key, String value);
-
   default String get(String key, String defaultValue) {
     String value = get(key);
     return value != null ? value : defaultValue;
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
new file mode 100644
index 0000000..12903cf
--- /dev/null
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hdds.conf.TimeDurationUtil.ParsedTimeDuration;
+
+/**
+ * Defines write contract of Configuration objects.
+ */
+public interface ConfigurationTarget {
+
+  void set(String key, String value);
+
+  default void setInt(String name, int value) {
+    set(name, Integer.toString(value));
+  }
+
+  default void setLong(String name, long value) {
+    set(name, Long.toString(value));
+  }
+
+  default void setBoolean(String name, boolean value) {
+    set(name, Boolean.toString(value));
+  }
+
+  default void setTimeDuration(String name, long value, TimeUnit unit) {
+    set(name, value + ParsedTimeDuration.unitFor(unit).suffix());
+  }
+
+  default <T> void setFromObject(T object) {
+    ConfigGroup configGroup =
+        object.getClass().getAnnotation(ConfigGroup.class);
+    String prefix = configGroup.prefix();
+    ConfigurationReflectionUtil.updateConfiguration(this, object, prefix);
+  }
+
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfigurationParent.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/MutableConfigurationSource.java
similarity index 73%
copy from hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfigurationParent.java
copy to hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/MutableConfigurationSource.java
index 92f4c12..98b9a3e 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfigurationParent.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/MutableConfigurationSource.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,15 +18,8 @@
 package org.apache.hadoop.hdds.conf;
 
 /**
- * Parent class for the example configuration.
+ * Configuration that can be both read and written.
  */
-public class SimpleConfigurationParent {
-
-  @Config(key = "enabled", defaultValue = "true", description = "Example "
-      + "boolean config.", tags = ConfigTag.MANAGEMENT)
-  private boolean enabled;
-
-  public boolean isEnabled() {
-    return enabled;
-  }
+public interface MutableConfigurationSource
+    extends ConfigurationSource, ConfigurationTarget {
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java
index f533a26..27f30a5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone;
 import java.io.IOException;
 
 import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.server.http.BaseHttpServer;
 
 /**
@@ -30,7 +30,8 @@ import org.apache.hadoop.hdds.server.http.BaseHttpServer;
  */
 public class HddsDatanodeHttpServer extends BaseHttpServer {
 
-  public HddsDatanodeHttpServer(OzoneConfiguration conf) throws IOException {
+  public HddsDatanodeHttpServer(MutableConfigurationSource conf)
+      throws IOException {
     super(conf, "hddsDatanode");
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 859cd20..b1c8370 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -38,7 +38,6 @@ import java.util.stream.Collectors;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Container2BCSIDMapProto;
@@ -162,7 +161,7 @@ public class ContainerStateMachine extends BaseStateMachine {
     metrics = CSMMetrics.create(gid);
     this.writeChunkFutureMap = new ConcurrentHashMap<>();
     applyTransactionCompletionMap = new ConcurrentHashMap<>();
-    int numPendingRequests = OzoneConfiguration.of(conf)
+    int numPendingRequests = conf
         .getObject(DatanodeRatisServerConfig.class)
         .getLeaderNumPendingRequests();
     int pendingRequestsByteLimit = (int) conf.getStorageSize(
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 52daeff..b8843de 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -33,6 +33,7 @@ import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hdds.StringUtils;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -127,7 +128,7 @@ public class TestBlockDeletingService {
    * creates some fake chunk files for testing.
    */
   private void createToDeleteBlocks(ContainerSet containerSet,
-      ConfigurationSource conf, int numOfContainers,
+      MutableConfigurationSource conf, int numOfContainers,
       int numOfBlocksPerContainer,
       int numOfChunksPerBlock) throws IOException {
     for (int x = 0; x < numOfContainers; x++) {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java
index e8b45ca..74b5fe5 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -35,6 +34,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.concurrent.atomic.AtomicLong;
 
+import static org.apache.hadoop.hdds.conf.OzoneConfiguration.newInstanceOf;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -67,8 +67,7 @@ public class TestContainerScrubberMetrics {
 
   @Before
   public void setup() {
-    conf = new OzoneConfiguration()
-        .getObject(ContainerScrubberConfiguration.class);
+    conf = newInstanceOf(ContainerScrubberConfiguration.class);
     conf.setMetadataScanInterval(0);
     conf.setDataScanInterval(0);
     controller = mockContainerController();
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
index 5778780..39f58bb 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdds.conf;
 
+import java.time.Duration;
+
 import static org.apache.hadoop.hdds.conf.ConfigTag.DATANODE;
 import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
 import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE;
@@ -30,19 +32,19 @@ import static org.apache.hadoop.hdds.ratis.RatisHelper.HDDS_DATANODE_RATIS_SERVE
 @ConfigGroup(prefix = HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY)
 public class DatanodeRatisServerConfig {
 
-  public static final String RATIS_SERVER_REQUEST_TIMEOUT_KEY =
+  private static final String RATIS_SERVER_REQUEST_TIMEOUT_KEY =
       "rpc.request.timeout";
 
-  public static final String RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY =
+  private static final String RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY =
       "watch.timeout";
 
-  public static final String RATIS_SERVER_NO_LEADER_TIMEOUT_KEY =
+  private static final String RATIS_SERVER_NO_LEADER_TIMEOUT_KEY =
       "Notification.no-leader.timeout";
 
-  public static final String RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY =
+  private static final String RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY =
       "rpcslowness.timeout";
 
-  public static final String RATIS_LEADER_NUM_PENDING_REQUESTS_KEY =
+  private static final String RATIS_LEADER_NUM_PENDING_REQUESTS_KEY =
       "write.element-limit";
 
   @Config(key = RATIS_SERVER_REQUEST_TIMEOUT_KEY,
@@ -52,14 +54,14 @@ public class DatanodeRatisServerConfig {
       description = "The timeout duration of the ratis write request " +
           "on Ratis Server."
   )
-  private long requestTimeOut = 60 * 1000;
+  private long requestTimeOut = Duration.ofSeconds(60).toMillis();
 
   public long getRequestTimeOut() {
     return requestTimeOut;
   }
 
-  public void setRequestTimeOut(long requestTimeOut) {
-    this.requestTimeOut = requestTimeOut;
+  public void setRequestTimeOut(Duration duration) {
+    this.requestTimeOut = duration.toMillis();
   }
 
   @Config(key = RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
@@ -70,14 +72,14 @@ public class DatanodeRatisServerConfig {
           "Timeout for the watch request in Ratis server to acknowledge a " +
           "particular request is replayed to all servers."
   )
-  private long watchTimeOut = 180 * 1000;
+  private long watchTimeOut = Duration.ofSeconds(180).toMillis();
 
   public long getWatchTimeOut() {
     return watchTimeOut;
   }
 
-  public void setWatchTimeOut(long watchTimeOut) {
-    this.watchTimeOut = watchTimeOut;
+  public void setWatchTimeOut(Duration duration) {
+    this.watchTimeOut = duration.toMillis();
   }
 
   @Config(key = RATIS_SERVER_NO_LEADER_TIMEOUT_KEY,
@@ -88,14 +90,14 @@ public class DatanodeRatisServerConfig {
           " that leader has not been elected for a long time and leader " +
           "changes its role to Candidate."
   )
-  private long noLeaderTimeout = 300 * 1000;
+  private long noLeaderTimeout = Duration.ofSeconds(300).toMillis();
 
   public long getNoLeaderTimeout() {
     return noLeaderTimeout;
   }
 
-  public void setNoLeaderTimeout(long noLeaderTimeout) {
-    this.noLeaderTimeout = noLeaderTimeout;
+  public void setNoLeaderTimeout(Duration duration) {
+    this.noLeaderTimeout = duration.toMillis();
   }
 
   @Config(key = RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY,
@@ -106,14 +108,14 @@ public class DatanodeRatisServerConfig {
           "notified that follower is slow. StateMachine will close down the " +
           "pipeline."
   )
-  private long followerSlownessTimeout = 300 * 1000;
+  private long followerSlownessTimeout = Duration.ofSeconds(300).toMillis();
 
   public long getFollowerSlownessTimeout() {
     return followerSlownessTimeout;
   }
 
-  public void setFollowerSlownessTimeout(long followerSlownessTimeout) {
-    this.followerSlownessTimeout = followerSlownessTimeout;
+  public void setFollowerSlownessTimeout(Duration duration) {
+    this.followerSlownessTimeout = duration.toMillis();
   }
 
   @Config(key = RATIS_LEADER_NUM_PENDING_REQUESTS_KEY,
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
index 80cc960..d5bf8f6 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.HddsConfServlet;
 import org.apache.hadoop.hdds.conf.HddsPrometheusConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
@@ -70,7 +70,7 @@ public abstract class BaseHttpServer {
       "org.eclipse.jetty.webapp.basetempdir";
 
   private HttpServer2 httpServer;
-  private final ConfigurationSource conf;
+  private final MutableConfigurationSource conf;
 
   private InetSocketAddress httpAddress;
   private InetSocketAddress httpsAddress;
@@ -84,7 +84,7 @@ public abstract class BaseHttpServer {
 
   private boolean profilerSupport;
 
-  public BaseHttpServer(ConfigurationSource conf, String name)
+  public BaseHttpServer(MutableConfigurationSource conf, String name)
       throws IOException {
     this.name = name;
     this.conf = conf;
@@ -150,7 +150,7 @@ public abstract class BaseHttpServer {
         httpServer.getWebAppContext().getServletContext()
             .setAttribute(PROMETHEUS_SINK, prometheusMetricsSink);
         HddsPrometheusConfig prometheusConfig =
-            OzoneConfiguration.of(conf).getObject(HddsPrometheusConfig.class);
+            conf.getObject(HddsPrometheusConfig.class);
         String token = prometheusConfig.getPrometheusEndpointToken();
         if (StringUtils.isNotEmpty(token)) {
           httpServer.getWebAppContext().getServletContext()
@@ -188,7 +188,7 @@ public abstract class BaseHttpServer {
    * Recon to initialize their HTTP / HTTPS server.
    */
   public static HttpServer2.Builder newHttpServer2BuilderForOzone(
-      ConfigurationSource conf, final InetSocketAddress httpAddr,
+      MutableConfigurationSource conf, final InetSocketAddress httpAddr,
       final InetSocketAddress httpsAddr, String name) throws IOException {
     HttpConfig.Policy policy = getHttpPolicy(conf);
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java
index f340bdf..306040d 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdds.server.http;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 
 /**
@@ -61,12 +61,12 @@ public final class HttpConfig {
     }
   }
 
-  public static Policy getHttpPolicy(ConfigurationSource conf) {
+  public static Policy getHttpPolicy(MutableConfigurationSource conf) {
     String policyStr = conf.get(OzoneConfigKeys.OZONE_HTTP_POLICY_KEY,
         OzoneConfigKeys.OZONE_HTTP_POLICY_DEFAULT);
     HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
     if (policy == null) {
-      throw new HadoopIllegalArgumentException("Unregonized value '"
+      throw new HadoopIllegalArgumentException("Unrecognized value '"
           + policyStr + "' for " + OzoneConfigKeys.OZONE_HTTP_POLICY_KEY);
     }
     conf.set(OzoneConfigKeys.OZONE_HTTP_POLICY_KEY, policy.name());
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
index 2d74898..3a2c49b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.http.FilterContainer;
@@ -199,7 +200,7 @@ public final class HttpServer2 implements FilterContainer {
   public static class Builder {
     private ArrayList<URI> endpoints = Lists.newArrayList();
     private String name;
-    private ConfigurationSource conf;
+    private MutableConfigurationSource conf;
     private ConfigurationSource sslConf;
     private String[] pathSpecs;
     private AccessControlList adminsAcl;
@@ -298,7 +299,7 @@ public final class HttpServer2 implements FilterContainer {
       return this;
     }
 
-    public Builder setConf(ConfigurationSource configuration) {
+    public Builder setConf(MutableConfigurationSource configuration) {
       this.conf = configuration;
       return this;
     }
@@ -576,7 +577,7 @@ public final class HttpServer2 implements FilterContainer {
   }
 
   private void initializeWebServer(String name, String hostName,
-      ConfigurationSource conf, String[] pathSpecs,
+      MutableConfigurationSource conf, String[] pathSpecs,
       String authFilterConfigPrefix,
       boolean securityEnabled) throws IOException {
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index e5d7160..617e231 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm.container;
 
 import java.io.IOException;
+import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
@@ -929,7 +930,7 @@ public class ReplicationManager
             "cluster. This property is used to configure the interval in " +
             "which that thread runs."
     )
-    private long interval = 5 * 60 * 1000;
+    private long interval = Duration.ofSeconds(300).toMillis();
 
     /**
      * Timeout for container replication & deletion command issued by
@@ -942,16 +943,14 @@ public class ReplicationManager
         description = "Timeout for the container replication/deletion commands "
             + "sent  to datanodes. After this timeout the command will be "
             + "retried.")
-    private long eventTimeout = 30 * 60 * 1000;
+    private long eventTimeout = Duration.ofMinutes(30).toMillis();
 
-
-    public void setInterval(long interval) {
-      this.interval = interval;
+    public void setInterval(Duration interval) {
+      this.interval = interval.toMillis();
     }
 
-
-    public void setEventTimeout(long eventTimeout) {
-      this.eventTimeout = eventTimeout;
+    public void setEventTimeout(Duration timeout) {
+      this.eventTimeout = timeout.toMillis();
     }
 
     public long getInterval() {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
index b644978..8a94462 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with this
  * work for additional information regarding copyright ownership.  The ASF
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.server;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.server.http.BaseHttpServer;
 
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.server.http.BaseHttpServer;
  */
 public class StorageContainerManagerHttpServer extends BaseHttpServer {
 
-  public StorageContainerManagerHttpServer(ConfigurationSource conf)
+  public StorageContainerManagerHttpServer(MutableConfigurationSource conf)
       throws IOException {
     super(conf, "scm");
   }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
index ba4403a..51692c3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
@@ -641,10 +641,8 @@ public class TestReplicationManager {
 
   @Test
   public void testGeneratedConfig() {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-
     ReplicationManagerConfiguration rmc =
-        ozoneConfiguration.getObject(ReplicationManagerConfiguration.class);
+        OzoneConfiguration.newInstanceOf(ReplicationManagerConfiguration.class);
 
     //default is not included in ozone-site.xml but generated from annotation
     //to the ozone-site-generated.xml which should be loaded by the
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index 9c1ef1c..96cd530 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -107,8 +107,8 @@ public class ContainerOperationClient implements ScmClient {
       String caCertificate =
           scmSecurityProtocolClient.getCACertificate();
       manager = new XceiverClientManager(conf,
-          OzoneConfiguration.of(conf).getObject(XceiverClientManager
-              .ScmClientConfig.class), caCertificate);
+          conf.getObject(XceiverClientManager.ScmClientConfig.class),
+          caCertificate);
     } else {
       manager = new XceiverClientManager(conf);
     }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index 1b2865d..2f7b107 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -82,7 +83,7 @@ public final class OzoneClientFactory {
    * @throws IOException
    */
   public static OzoneClient getRpcClient(String omHost, Integer omRpcPort,
-      ConfigurationSource config)
+      MutableConfigurationSource config)
       throws IOException {
     Preconditions.checkNotNull(omHost);
     Preconditions.checkNotNull(omRpcPort);
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 46f7651..56c867d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hdds.client.OzoneQuota;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.StorageType;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
@@ -179,8 +178,7 @@ public class RpcClient implements ClientProtocol {
     }
 
     this.xceiverClientManager = new XceiverClientManager(conf,
-        OzoneConfiguration.of(conf).getObject(XceiverClientManager.
-            ScmClientConfig.class), caCertPem);
+        conf.getObject(XceiverClientManager.ScmClientConfig.class), caCertPem);
 
     int configuredChunkSize = (int) conf
         .getStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index b84bdaf..6eb8b18 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -33,9 +33,7 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.OptionalInt;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.conf.OMClientConfig;
@@ -499,9 +497,8 @@ public final class OmUtils {
   /**
    * Return OM Client Rpc Time out.
    */
-  public static long getOMClientRpcTimeOut(Configuration configuration) {
-    return OzoneConfiguration.of(configuration)
-        .getObject(OMClientConfig.class).getRpcTimeOut();
+  public static long getOMClientRpcTimeOut(ConfigurationSource configuration) {
+    return configuration.getObject(OMClientConfig.class).getRpcTimeOut();
   }
 
   /**
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
index b0991a6..3b2692d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
@@ -174,7 +174,7 @@ public class OMFailoverProxyProvider implements
         ProtobufRpcEngine.class);
     return RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress, ugi,
         hadoopConf, NetUtils.getDefaultSocketFactory(hadoopConf),
-            (int) OmUtils.getOMClientRpcTimeOut(hadoopConf));
+            (int) OmUtils.getOMClientRpcTimeOut(conf));
 
   }
 
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
index b1a4e45..6e7c052 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.ozone.om.helpers;
 
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
@@ -31,6 +30,7 @@ import java.util.Arrays;
 import java.util.BitSet;
 import java.util.List;
 
+import static org.apache.hadoop.hdds.conf.OzoneConfiguration.newInstanceOf;
 import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
 import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP;
 import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER;
@@ -43,7 +43,7 @@ import static org.junit.Assert.assertTrue;
 public class TestOzoneAclUtil {
 
   private static final List<OzoneAcl> DEFAULT_ACLS =
-      getDefaultAcls(new OzoneConfiguration());
+      getDefaultAcls();
 
   private static final OzoneAcl USER1 = new OzoneAcl(USER, "user1",
       ACLType.READ_ACL, ACCESS);
@@ -56,7 +56,7 @@ public class TestOzoneAclUtil {
 
   @Test
   public void testAddAcl() throws IOException {
-    List<OzoneAcl> currentAcls = getDefaultAcls(new OzoneConfiguration());
+    List<OzoneAcl> currentAcls = getDefaultAcls();
     assertTrue(currentAcls.size() > 0);
 
     // Add new permission to existing acl entry.
@@ -88,7 +88,7 @@ public class TestOzoneAclUtil {
     addAndVerifyAcl(currentAcls, USER1, false, 0);
     removeAndVerifyAcl(currentAcls, USER1, false, 0);
 
-    currentAcls = getDefaultAcls(new OzoneConfiguration());
+    currentAcls = getDefaultAcls();
     assertTrue(currentAcls.size() > 0);
 
     // Add new permission to existing acl entru.
@@ -166,7 +166,7 @@ public class TestOzoneAclUtil {
    * @return list of ozoneAcls.
    * @throws IOException
    * */
-  private static List<OzoneAcl> getDefaultAcls(OzoneConfiguration conf) {
+  private static List<OzoneAcl> getDefaultAcls() {
     List<OzoneAcl> ozoneAcls = new ArrayList<>();
     //User ACL
     UserGroupInformation ugi;
@@ -176,7 +176,7 @@ public class TestOzoneAclUtil {
       ugi = UserGroupInformation.createRemoteUser("user0");
     }
 
-    OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class);
+    OzoneAclConfig aclConfig = newInstanceOf(OzoneAclConfig.class);
     IAccessAuthorizer.ACLType userRights = aclConfig.getUserDefaultRights();
     IAccessAuthorizer.ACLType groupRights = aclConfig.getGroupDefaultRights();
 
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 5a0b68d..2a5cf24 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone;
 
+import java.time.Duration;
 import java.util.Arrays;
 import java.util.List;
 import java.util.HashSet;
@@ -32,6 +33,7 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.failure.FailureManager;
@@ -229,8 +231,11 @@ public class MiniOzoneChaosCluster extends MiniOzoneHAClusterImpl {
           OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY,
           2);
       conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
-      conf.setInt("hdds.scm.replication.thread.interval", 10 * 1000);
-      conf.setInt("hdds.scm.replication.event.timeout", 20 * 1000);
+      ReplicationManagerConfiguration replicationConf =
+          conf.getObject(ReplicationManagerConfiguration.class);
+      replicationConf.setInterval(Duration.ofSeconds(10));
+      replicationConf.setEventTimeout(Duration.ofSeconds(20));
+      conf.setFromObject(replicationConf);
       conf.setInt(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100);
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, 100);
       conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, 100);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
index e419598..7335a93 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.time.Duration;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
@@ -65,15 +66,11 @@ class OzoneContract extends AbstractFSContract {
 
   public static void createCluster() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            10, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java
index c2833f5..c90a7ba 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs.ozone.contract.rooted;
 
 import java.io.IOException;
+import java.time.Duration;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
@@ -62,15 +63,11 @@ class RootedOzoneContract extends AbstractFSContract {
 
   public static void createCluster() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            10, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
index a75072e..2bb1fb1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdds.scm.pipeline;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
@@ -32,8 +31,8 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.time.Duration;
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 import org.junit.Rule;
 import org.junit.rules.Timeout;
 
@@ -61,14 +60,11 @@ public class TestNodeFailure {
   @BeforeClass
   public static void init() throws Exception {
     final OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setTimeDuration(
-        RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-        DatanodeRatisServerConfig.RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY,
-        10, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-        DatanodeRatisServerConfig.RATIS_SERVER_NO_LEADER_TIMEOUT_KEY,
-        10, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(10));
+    ratisServerConfig.setNoLeaderTimeout(Duration.ofSeconds(10));
+    conf.setFromObject(ratisServerConfig);
     conf.set(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, "2s");
 
     cluster = MiniOzoneCluster.newBuilder(conf)
@@ -84,7 +80,7 @@ public class TestNodeFailure {
         HddsProtos.ReplicationType.RATIS,
         HddsProtos.ReplicationFactor.THREE);
 
-    timeForFailure = (int) conf.getObject(DatanodeRatisServerConfig.class)
+    timeForFailure = (int) ratisServerConfig
         .getFollowerSlownessTimeout();
   }
 
@@ -133,4 +129,4 @@ public class TestNodeFailure {
       }
     }, 1000, 1000 * 60);
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 5baa65b..1c1fcc6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -29,7 +29,6 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR;
-import static org.hadoop.ozone.recon.codegen.ReconSqlDbConfig.ConfigKeys.OZONE_RECON_SQL_DB_JDBC_URL;
 
 import java.io.File;
 import java.io.IOException;
@@ -80,6 +79,7 @@ import org.apache.hadoop.ozone.recon.ReconServer;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig;
 import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -804,8 +804,11 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
           .getAbsolutePath());
       conf.set(OZONE_RECON_SCM_DB_DIR,
           tempNewFolder.getAbsolutePath());
-      conf.set(OZONE_RECON_SQL_DB_JDBC_URL, "jdbc:derby:" +
-          tempNewFolder.getAbsolutePath() + "/ozone_recon_derby.db");
+
+      ReconSqlDbConfig dbConfig = conf.getObject(ReconSqlDbConfig.class);
+      dbConfig.setJdbcUrl("jdbc:derby:" + tempNewFolder.getAbsolutePath()
+          + "/ozone_recon_derby.db");
+      conf.setFromObject(dbConfig);
 
       conf.set(OZONE_RECON_HTTP_ADDRESS_KEY, "0.0.0.0:0");
       conf.set(OZONE_RECON_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
index 26dee9d..087376b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
@@ -43,6 +43,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.time.Duration;
 import java.util.concurrent.TimeUnit;
 
 import org.junit.Rule;
@@ -91,15 +92,11 @@ public class Test2WayCommitInRatis {
             60, TimeUnit.SECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 60000,
             TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            10, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
index 7a79611..40756b0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
@@ -47,6 +47,7 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.time.Duration;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
@@ -102,15 +103,11 @@ public class TestBlockOutputStreamWithFailures {
     conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
         StorageUnit.MB);
     conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java
index f3937ca..6fa54ed 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java
@@ -45,6 +45,7 @@ import org.junit.rules.Timeout;
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.time.Duration;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
@@ -96,15 +97,11 @@ public class TestBlockOutputStreamWithFailuresFlushDelay {
     conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
         StorageUnit.MB);
     conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
index 012ba6a..b9b7e5e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
@@ -52,6 +52,7 @@ import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.time.Duration;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
@@ -106,15 +107,11 @@ public class TestCommitWatcher {
             10, TimeUnit.SECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 1000,
             TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
index 5681a0e..2c7f818 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -23,11 +23,11 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.ozone.HddsDatanodeService;
@@ -50,6 +50,7 @@ import org.junit.Test;
 
 import java.io.File;
 import java.io.IOException;
+import java.time.Duration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -99,16 +100,15 @@ public class TestContainerReplicationEndToEnd {
         2 * containerReportInterval, TimeUnit.MILLISECONDS);
     conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000,
         TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-        DatanodeRatisServerConfig.RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY,
-        1000, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-        DatanodeRatisServerConfig.RATIS_SERVER_NO_LEADER_TIMEOUT_KEY,
-        1000, TimeUnit.SECONDS);
-    conf.setLong("hdds.scm.replication.thread.interval",
-        containerReportInterval);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(1000));
+    ratisServerConfig.setNoLeaderTimeout(Duration.ofSeconds(1000));
+    conf.setFromObject(ratisServerConfig);
+    ReplicationManagerConfiguration replicationConf =
+        conf.getObject(ReplicationManagerConfiguration.class);
+    replicationConf.setInterval(Duration.ofMillis(containerReportInterval));
+    conf.setFromObject(replicationConf);
     conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2);
 
     conf.setQuietMode(false);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java
index 50a4bcf..7af57de 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.client.rpc;
 
 import java.io.File;
+import java.time.Duration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Optional;
@@ -89,23 +90,13 @@ public class TestContainerStateMachineFailureOnRead {
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 1200, TimeUnit.SECONDS);
     conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000,
         TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-            DatanodeRatisServerConfig.RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY,
-        1000, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-            DatanodeRatisServerConfig.RATIS_SERVER_NO_LEADER_TIMEOUT_KEY,
-        1000, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-            DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-        3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-            DatanodeRatisServerConfig.
-                RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-        3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(1000));
+    ratisServerConfig.setNoLeaderTimeout(Duration.ofSeconds(1000));
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
         RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
             "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 1e44562..fba3ba1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -61,6 +61,7 @@ import org.junit.Test;
 import java.io.File;
 import java.io.IOException;
 import java.nio.file.Path;
+import java.time.Duration;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -124,15 +125,11 @@ public class TestContainerStateMachineFailures {
             + ".client.request.write.timeout", 10, TimeUnit.SECONDS);
     conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
             + ".client.request.watch.timeout", 10, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            10, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java
index dc4ee8c..cac16f1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.client.rpc;
 
 import java.io.File;
 import java.io.IOException;
+import java.time.Duration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -112,23 +113,13 @@ public class TestDeleteWithSlowFollower {
         TimeUnit.SECONDS);
     conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000,
         TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-            DatanodeRatisServerConfig.RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY,
-        1000, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-        RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-        DatanodeRatisServerConfig.RATIS_SERVER_NO_LEADER_TIMEOUT_KEY,
-        1000, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(1000));
+    ratisServerConfig.setNoLeaderTimeout(Duration.ofSeconds(1000));
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index a311a83..e35a393 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.client.rpc;
 
 import java.io.IOException;
+import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -106,15 +107,11 @@ public class TestFailureHandlingByClient {
     conf.setBoolean(
         OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
     conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index a5090eb..8f145b4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -49,6 +49,7 @@ import org.junit.*;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
+import java.time.Duration;
 import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
@@ -104,15 +105,11 @@ public class TestFailureHandlingByClientFlushDelay {
     conf.setBoolean(
         OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
     conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
index eab6ad7..1ca073d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
@@ -46,6 +46,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.time.Duration;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
@@ -88,15 +89,11 @@ public class TestMultiBlockWritesWithDnFailures {
     blockSize = 4 * chunkSize;
     conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
index 4ac9aa5..63e63af 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
@@ -57,6 +57,7 @@ import org.junit.Test;
 import java.io.File;
 import java.io.IOException;
 import java.nio.file.Path;
+import java.time.Duration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -109,15 +110,11 @@ public class TestValidateBCSIDOnRestart {
             + ".client.request.write.timeout", 10, TimeUnit.SECONDS);
     conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
             + ".client.request.watch.timeout", 10, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            10, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY + "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
index 7e1b4fb..131d289 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
@@ -49,6 +49,7 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.time.Duration;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
@@ -103,15 +104,11 @@ public class TestWatchForCommit {
             + ".client.request.write.timeout", 10, TimeUnit.SECONDS);
     conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
             + ".client.request.watch.timeout", 10, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
index 1310585..154bd55 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.time.Duration;
 import java.util.concurrent.TimeUnit;
 
 /**
@@ -38,15 +39,11 @@ public abstract class TestDataValidate {
   private static MiniOzoneCluster cluster = null;
 
   static void startCluster(OzoneConfiguration conf) throws Exception {
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            10, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithDummyContainers.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithDummyContainers.java
index 4119666..7415c1b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithDummyContainers.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithDummyContainers.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.freon;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -49,9 +48,6 @@ public class TestDataValidateWithDummyContainers
   @BeforeClass
   public static void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
-    ContainerScrubberConfiguration sc =
-        conf.getObject(ContainerScrubberConfiguration.class);
-    sc.setEnabled(false);
     conf.setBoolean(HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA, false);
     conf.setBoolean(OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED,
         false);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
index fa25361..8984b66 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
@@ -32,6 +32,7 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.time.Duration;
 import java.util.concurrent.TimeUnit;
 
 import org.junit.Rule;
@@ -68,15 +69,11 @@ public class TestFreonWithDatanodeRestart {
         TimeUnit.SECONDS);
     conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 1,
         TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
index cc2fd7c..61ac6af 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
@@ -37,6 +37,7 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.time.Duration;
 import java.util.concurrent.TimeUnit;
 
 /**
@@ -61,15 +62,11 @@ public class TestFreonWithPipelineDestroy {
             1, TimeUnit.SECONDS);
     conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL,
             1, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
index b39139d..ddb27ce 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.freon;
 
+import java.time.Duration;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -51,15 +52,11 @@ public class TestRandomKeyGenerator {
   @BeforeClass
   public static void init() throws Exception {
     conf = new OzoneConfiguration();
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
-    conf.setTimeDuration(
-            RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
-                    DatanodeRatisServerConfig.
-                            RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
-            3, TimeUnit.SECONDS);
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
+    conf.setFromObject(ratisServerConfig);
     conf.setTimeDuration(
             RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                     "rpc.request.timeout",
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
index 2fa87b9..57c7061 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
@@ -23,6 +23,7 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
@@ -38,6 +39,7 @@ import org.junit.rules.Timeout;
 public class TestOzoneManagerRocksDBLogging {
   private MiniOzoneCluster cluster = null;
   private OzoneConfiguration conf;
+  private RocksDBConfiguration dbConf;
 
   @Rule
   public Timeout timeout = new Timeout(60000);
@@ -48,6 +50,7 @@ public class TestOzoneManagerRocksDBLogging {
   @Before
   public void init() throws Exception {
     conf = new OzoneConfiguration();
+    dbConf = conf.getObject(RocksDBConfiguration.class);
     enableRocksDbLogging(false);
     cluster =  MiniOzoneCluster.newBuilder(conf)
         .build();
@@ -80,7 +83,8 @@ public class TestOzoneManagerRocksDBLogging {
   }
 
   private void enableRocksDbLogging(boolean b) {
-    conf.setBoolean("hadoop.hdds.db.rocksdb.logging.enabled", b);
+    dbConf.setRocksdbLoggingEnabled(b);
+    conf.setFromObject(dbConf);
   }
 
   private static void waitForRocksDbLog()
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
index 84fa45d..0dd89e0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
@@ -24,6 +24,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.R
 import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer.runTestOzoneContainerViaDataNode;
 import static org.junit.Assert.assertEquals;
 
+import java.time.Duration;
 import java.util.List;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
 import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.apache.hadoop.ozone.recon.tasks.ReconTaskConfig;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition;
 import org.hadoop.ozone.recon.schema.tables.pojos.UnhealthyContainers;
@@ -68,7 +70,11 @@ public class TestReconTasks {
     conf = new OzoneConfiguration();
     conf.set(HDDS_CONTAINER_REPORT_INTERVAL, "5s");
     conf.set(HDDS_PIPELINE_REPORT_INTERVAL, "5s");
-    conf.set("ozone.recon.task.missingcontainer.interval", "15s");
+
+    ReconTaskConfig taskConfig = conf.getObject(ReconTaskConfig.class);
+    taskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(15));
+    conf.setFromObject(taskConfig);
+
     conf.set("ozone.scm.stale.node.interval", "10s");
     conf.set("ozone.scm.dead.node.interval", "20s");
     cluster =  MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
index eb2185f..be146fc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
@@ -26,6 +26,7 @@ import java.util.concurrent.atomic.AtomicReference;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -62,9 +63,12 @@ public class TestReconWithOzoneManagerHA {
   public void setup() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, Boolean.TRUE.toString());
+
     // Sync to disk enabled
-    conf.set("hadoop.hdds.db.rocksdb.writeoption.sync",
-        Boolean.TRUE.toString());
+    RocksDBConfiguration dbConf = conf.getObject(RocksDBConfiguration.class);
+    dbConf.setSyncOption(true);
+    conf.setFromObject(dbConf);
+
     cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
         .setClusterId(UUID.randomUUID().toString())
         .setScmId(UUID.randomUUID().toString())
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
index cd1c085..f24c00d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with this
  * work for additional information regarding copyright ownership.  The ASF
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.server.http.BaseHttpServer;
 import org.apache.hadoop.ozone.OzoneConsts;
 
@@ -31,8 +31,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_SERVICE_LIST_HTTP_END
  */
 public class OzoneManagerHttpServer extends BaseHttpServer {
 
-  public OzoneManagerHttpServer(ConfigurationSource conf, OzoneManager om)
-      throws IOException {
+  public OzoneManagerHttpServer(MutableConfigurationSource conf,
+      OzoneManager om) throws IOException {
     super(conf, "ozoneManager");
     addServlet("serviceList", OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT,
         ServiceListJSONServlet.class);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
index 52fe253..1c78251 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
@@ -31,7 +31,7 @@ import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.server.http.HttpConfig;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
@@ -71,7 +71,7 @@ public class OzoneManagerSnapshotProvider {
 
   private static final String OM_SNAPSHOT_DB = "om.snapshot.db";
 
-  public OzoneManagerSnapshotProvider(ConfigurationSource conf,
+  public OzoneManagerSnapshotProvider(MutableConfigurationSource conf,
       File omRatisSnapshotDir, List<OMNodeDetails> peerNodes) {
 
     LOG.info("Initializing OM Snapshot Provider");
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java
index 48873cb..53ae1e7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om;
 import java.io.File;
 
 import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -46,7 +46,7 @@ public class TestOMStorage {
     final File testDir = createTestDir();
     final File dbDir = new File(testDir, "omDbDir");
     final File metaDir = new File(testDir, "metaDir");   // should be ignored.
-    final ConfigurationSource conf = new OzoneConfiguration();
+    final MutableConfigurationSource conf = new OzoneConfiguration();
     conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, dbDir.getPath());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
 
@@ -66,7 +66,7 @@ public class TestOMStorage {
   public void testGetOmDbDirWithFallback() {
     final File testDir = createTestDir();
     final File metaDir = new File(testDir, "metaDir");
-    final ConfigurationSource conf = new OzoneConfiguration();
+    final MutableConfigurationSource conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
 
     try {
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSqlDbConfig.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSqlDbConfig.java
index 704d26b..0c649c8 100644
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSqlDbConfig.java
+++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSqlDbConfig.java
@@ -229,11 +229,4 @@ public class ReconSqlDbConfig {
     this.sqlDbDialect = sqlDbDialect;
   }
 
-  /**
-   * Class to hold config keys related to Recon SQL DB.
-   */
-  public static class ConfigKeys {
-    public static final String OZONE_RECON_SQL_DB_JDBC_URL =
-        "ozone.recon.sql.db.jdbc.url";
-  }
 }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
index 8bd296f..315dd5c 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -66,8 +65,7 @@ public class ContainerHealthTask extends ReconScmTask {
     this.containerSchemaManager = containerSchemaManager;
     this.placementPolicy = placementPolicy;
     this.containerManager = containerManager;
-    this.interval = TimeUnit.SECONDS.toMillis(
-        reconTaskConfig.getMissingContainerTaskInterval());
+    interval = reconTaskConfig.getMissingContainerTaskInterval().toMillis();
   }
 
   public synchronized void run() {
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/PipelineSyncTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/PipelineSyncTask.java
index ecd1b45..d5de2bc 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/PipelineSyncTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/PipelineSyncTask.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.ozone.recon.scm;
 
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
@@ -49,8 +48,7 @@ public class PipelineSyncTask extends ReconScmTask {
     super(reconTaskStatusDao);
     this.scmClient = scmClient;
     this.reconPipelineManager = pipelineManager;
-    this.interval = TimeUnit.SECONDS.toMillis(
-        reconTaskConfig.getPipelineSyncTaskInterval());
+    this.interval = reconTaskConfig.getPipelineSyncTaskInterval().toMillis();
   }
 
   @Override
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java
index 688e3ac..c05143e 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.ozone.recon.tasks;
 
-import java.util.concurrent.TimeUnit;
+import java.time.Duration;
 
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
@@ -32,38 +32,38 @@ import org.apache.hadoop.hdds.conf.ConfigType;
 public class ReconTaskConfig {
 
   @Config(key = "pipelinesync.interval",
-      type = ConfigType.TIME, timeUnit = TimeUnit.SECONDS,
+      type = ConfigType.TIME,
       defaultValue = "600s",
       tags = { ConfigTag.RECON, ConfigTag.OZONE },
       description = "The time interval of periodic sync of pipeline state " +
           "from SCM to Recon."
   )
-  private long pipelineSyncTaskInterval;
+  private long pipelineSyncTaskInterval = Duration.ofMinutes(10).toMillis();
 
-  public long getPipelineSyncTaskInterval() {
-    return pipelineSyncTaskInterval;
+  public Duration getPipelineSyncTaskInterval() {
+    return Duration.ofMillis(pipelineSyncTaskInterval);
   }
 
-  public void setPipelineSyncTaskInterval(long pipelineSyncTaskInterval) {
-    this.pipelineSyncTaskInterval = pipelineSyncTaskInterval;
+  public void setPipelineSyncTaskInterval(Duration interval) {
+    this.pipelineSyncTaskInterval = interval.toMillis();
   }
 
   @Config(key = "missingcontainer.interval",
-      type = ConfigType.TIME, timeUnit = TimeUnit.SECONDS,
+      type = ConfigType.TIME,
       defaultValue = "300s",
       tags = { ConfigTag.RECON, ConfigTag.OZONE },
       description = "The time interval of the periodic check for " +
           "containers with zero replicas in the cluster as reported by " +
           "Datanodes."
   )
-  private long missingContainerTaskInterval;
+  private long missingContainerTaskInterval = Duration.ofMinutes(5).toMillis();
 
-  public long getMissingContainerTaskInterval() {
-    return missingContainerTaskInterval;
+  public Duration getMissingContainerTaskInterval() {
+    return Duration.ofMillis(missingContainerTaskInterval);
   }
 
-  public void setMissingContainerTaskInterval(long interval) {
-    this.missingContainerTaskInterval = interval;
+  public void setMissingContainerTaskInterval(Duration interval) {
+    this.missingContainerTaskInterval = interval.toMillis();
   }
 
 }
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
index 4dd31e7..d97b143 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
@@ -24,6 +24,7 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
@@ -123,7 +124,7 @@ public class TestContainerHealthTask extends AbstractReconSqlDBTest {
     long currentTime = System.currentTimeMillis();
     ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class);
     ReconTaskConfig reconTaskConfig = new ReconTaskConfig();
-    reconTaskConfig.setMissingContainerTaskInterval(2);
+    reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2));
     ContainerHealthTask containerHealthTask =
         new ContainerHealthTask(scmMock.getContainerManager(),
             reconTaskStatusDao, containerSchemaManager,
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
index d8c0bf0..3bb10d3 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.s3;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.server.http.BaseHttpServer;
 
 /**
@@ -32,7 +32,7 @@ public class S3GatewayHttpServer extends BaseHttpServer {
    */
   public static final int FILTER_PRIORITY_DO_AFTER = 50;
 
-  public S3GatewayHttpServer(ConfigurationSource conf,
+  public S3GatewayHttpServer(MutableConfigurationSource conf,
       String name) throws IOException {
     super(conf, name);
   }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java
index eebc388..81f8f64 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java
@@ -22,7 +22,7 @@ import java.io.IOException;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.cli.GenericCli;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
@@ -105,7 +105,7 @@ public class OzoneAdmin extends GenericCli implements WithScmClient {
     }
   }
 
-  private void checkAndSetSCMAddressArg(ConfigurationSource conf) {
+  private void checkAndSetSCMAddressArg(MutableConfigurationSource conf) {
     if (StringUtils.isNotEmpty(scm)) {
       conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm);
     }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java
index d16a3f2..850506e 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.freon;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.server.http.BaseHttpServer;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 
@@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
  * Http server to provide metrics + profile endpoint.
  */
 public class FreonHttpServer extends BaseHttpServer {
-  public FreonHttpServer(ConfigurationSource conf) throws IOException {
+  public FreonHttpServer(MutableConfigurationSource conf) throws IOException {
     super(conf, "freon");
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org